Beispiel #1
0
 def test_order_by_group_by(self):
     self.connection.execute("INSERT INTO test VALUES (100, 'Title 10')")
     self.connection.execute("INSERT INTO test VALUES (101, 'Title 10')")
     id = Column("id", "test")
     title = Column("title", "test")
     expr = Select(Count(id), group_by=title, order_by=Count(id))
     result = self.connection.execute(expr)
     self.assertEquals(result.get_all(), [(1, ), (3, )])
Beispiel #2
0
    def findBinariesForDomination(self, distroarchseries, pocket):
        """Find binary publications that need dominating.

        This is only for traditional domination, where the latest published
        publication is always kept published.  It will ignore publications
        that have no other publications competing for the same binary package.
        """
        BPPH = BinaryPackagePublishingHistory
        BPR = BinaryPackageRelease

        bpph_location_clauses = [
            BPPH.status == PackagePublishingStatus.PUBLISHED,
            BPPH.distroarchseries == distroarchseries,
            BPPH.archive == self.archive,
            BPPH.pocket == pocket,
        ]
        candidate_binary_names = Select(BPPH.binarypackagenameID,
                                        And(*bpph_location_clauses),
                                        group_by=BPPH.binarypackagenameID,
                                        having=(Count() > 1))
        main_clauses = bpph_location_clauses + [
            BPR.id == BPPH.binarypackagereleaseID,
            BPR.binarypackagenameID.is_in(candidate_binary_names),
            BPR.binpackageformat != BinaryPackageFormat.DDEB,
        ]

        # We're going to access the BPRs as well.  Since we make the
        # database look them up anyway, and since there won't be many
        # duplications among them, load them alongside the publications.
        # We'll also want their BinaryPackageNames, but adding those to
        # the join would complicate the query.
        query = IStore(BPPH).find((BPPH, BPR), *main_clauses)
        bpphs = list(DecoratedResultSet(query, itemgetter(0)))
        load_related(BinaryPackageName, bpphs, ['binarypackagenameID'])
        return bpphs
Beispiel #3
0
    def findSourcesForDomination(self, distroseries, pocket):
        """Find binary publications that need dominating.

        This is only for traditional domination, where the latest published
        publication is always kept published.  See `find_live_source_versions`
        for this logic.

        To optimize for that logic, `findSourcesForDomination` will ignore
        publications that have no other publications competing for the same
        binary package.  There'd be nothing to do for those cases.
        """
        SPPH = SourcePackagePublishingHistory
        SPR = SourcePackageRelease

        spph_location_clauses = self._composeActiveSourcePubsCondition(
            distroseries, pocket)
        candidate_source_names = Select(SPPH.sourcepackagenameID,
                                        And(join_spph_spr(),
                                            spph_location_clauses),
                                        group_by=SPPH.sourcepackagenameID,
                                        having=(Count() > 1))

        # We'll also access the SourcePackageReleases associated with
        # the publications we find.  Since they're in the join anyway,
        # load them alongside the publications.
        # Actually we'll also want the SourcePackageNames, but adding
        # those to the (outer) query would complicate it, and
        # potentially slow it down.
        query = IStore(SPPH).find(
            (SPPH, SPR), join_spph_spr(),
            SPPH.sourcepackagenameID.is_in(candidate_source_names),
            spph_location_clauses)
        spphs = DecoratedResultSet(query, itemgetter(0))
        load_related(SourcePackageName, spphs, ['sourcepackagenameID'])
        return spphs
Beispiel #4
0
    def get_top_participants(self, list_name, start, end, limit=None):
        """ Return all the participants between two given dates.

        :param list_name: The name of the mailing list in which this email
            should be searched.
        :param start: A datetime object representing the starting date of
            the interval to query.
        :param end: A datetime object representing the ending date of
            the interval to query.
        :param limit: Limit the number of participants to return. If None or
            not supplied, return them all.
        :returns: The list of thread-starting messages.
        """
        number = Alias(Count(Email.sender_email), "number")
        part = self.db.find(
            (Email.sender_name, Email.sender_email, number),
            And(
                Email.list_name == unicode(list_name),
                Email.date >= start,
                Email.date < end,
            )).group_by(Email.sender_email,
                        Email.sender_name).order_by(Desc(number))
        if limit is not None:
            part = part.config(limit=limit)
        return list(part)
Beispiel #5
0
 def getProductsWithTemplates(self):
     """Get Product.ids for projects with any translations templates."""
     return self.store.find(
         Product,
         POTemplate.productseriesID == ProductSeries.id,
         ProductSeries.productID == Product.id,
     ).group_by(Product).having(Count(POTemplate.id) > 0)
 def ownerCounts(self):
     """See `IGitCollection`."""
     is_team = Person.teamowner != None
     owners = self._getRepositorySelect((GitRepository.owner_id, ))
     counts = dict(
         self.store.find((is_team, Count(Person.id)),
                         Person.id.is_in(owners)).group_by(is_team))
     return (counts.get(False, 0), counts.get(True, 0))
 def ownerCounts(self):
     """See `IBranchCollection`."""
     is_team = Person.teamowner != None
     branch_owners = self._getBranchSelect((Branch.ownerID, ))
     counts = dict(
         self.store.find((is_team, Count(Person.id)),
                         Person.id.is_in(branch_owners)).group_by(is_team))
     return (counts.get(False, 0), counts.get(True, 0))
Beispiel #8
0
    def completed_laps(self):
        if hasattr(self, '_complete_laps') and self._complete_laps is not None:
            return self._complete_laps

        store = Store.of(self)
        query = And(Racer.id == RacerLap.racer_id,
                    Racer.category_id == self.id)
        data = store.using(RacerLap, Racer).find((Count(1)), query)
        data = data.group_by(Racer.category_id, Racer.id)
        self._complete_laps = max(list(data) or [0])
        return self._complete_laps
 def getAccessPolicyGrantCounts(self, pillar):
     """See `ISharingService`."""
     policies = getUtility(IAccessPolicySource).findByPillar([pillar])
     ids = [policy.id for policy in policies]
     store = IStore(AccessPolicyGrant)
     count_select = Select(
         (Count(), ),
         tables=(AccessPolicyGrant, ),
         where=AccessPolicyGrant.policy == AccessPolicy.id)
     return store.find((AccessPolicy.type, ColumnSelect(count_select)),
                       AccessPolicy.id.is_in(ids))
Beispiel #10
0
    def numOwnersOfDevice(
        self, bus=None, vendor_id=None, product_id=None, driver_name=None,
        package_name=None, distro_target=None):
        """See `IHWSubmissionSet`."""
        store = IStore(HWSubmission)
        submitters_with_device_select, all_submitters_select = (
            self._submissionsSubmitterSelects(
                HWSubmission.raw_emailaddress, bus, vendor_id, product_id,
                driver_name, package_name, distro_target))

        submitters_with_device = store.execute(
            Select(
                columns=[Count()],
                tables=[Alias(submitters_with_device_select, 'addresses')]))
        all_submitters = store.execute(
            Select(
                columns=[Count()],
                tables=[Alias(all_submitters_select, 'addresses')]))

        return (submitters_with_device.get_one()[0],
                all_submitters.get_one()[0])
 def getStatusCountsForProductSeries(self, product_series):
     """See `ISpecificationSet`."""
     # Find specs targeted to the series or a milestone in the
     # series. The milestone set is materialised client-side to
     # get a good plan for the specification query.
     return list(IStore(Specification).find(
         (Specification.implementation_status, Count()),
         Or(
             Specification.productseries == product_series,
             Specification.milestoneID.is_in(list(
                 product_series.all_milestones.values(Milestone.id)))))
         .group_by(Specification.implementation_status))
Beispiel #12
0
 def preload_translators_count(languages):
     from lp.registry.model.person import PersonLanguage
     ids = set(language.id
               for language in languages).difference(set([None]))
     counts = IStore(Language).using(
         LeftJoin(Language, self._getTranslatorJoins(),
                  PersonLanguage.languageID == Language.id), ).find(
                      (Language, Count(PersonLanguage)),
                      Language.id.is_in(ids),
                  ).group_by(Language)
     for language, count in counts:
         get_property_cache(language).translators_count = count
Beispiel #13
0
def extractTrendingHashtags(store, limit=10, duration=None):
    """Extract information about trending hashtags and store it in FluidDB.

    @param store: The storm store to query and to save our result to.
    @param limit: Optionally, the number of objects to retrieve.
    @param duration: Optionally, the recent time period to look at when
        determining which hashtags are trending.  Default is 28 days.

    The storm query below results in SQL like:

        SELECT COUNT(DISTINCT comments.object_id) AS count,
               about_tag_values.value,
               array_agg(ROW(comments.username, comments.creation_time))
        FROM about_tag_values, comment_object_link, comments
        WHERE about_tag_values.value LIKE '#%' AND
              about_tag_values.object_id = comment_object_link.object_id AND
              comments.object_id = comment_object_link.comment_id AND
              comments.creation_time >= '2012-11-09 07:42:40'::TIMESTAMP AND
              CHAR_LENGTH(about_tag_values.value) >= 2
        GROUP BY about_tag_values.value
        ORDER BY count DESC
        LIMIT 10
    """
    duration = timedelta(days=28) if duration is None else duration
    startTime = datetime.utcnow() - duration
    count = Alias(Count(Comment.objectID, distinct=True))
    result = store.find(
        (count, AboutTagValue.value,
         Func('array_agg', Row(Comment.username, Comment.creationTime))),
        Like(AboutTagValue.value,
             u'#%'), AboutTagValue.objectID == CommentObjectLink.objectID,
        Comment.objectID == CommentObjectLink.commentID,
        Comment.creationTime >= startTime,
        Func('CHAR_LENGTH', AboutTagValue.value) >= 2)
    result.group_by(AboutTagValue.value)
    result.order_by(Desc(count))
    result.config(limit=limit)

    data = [{
        'count': count,
        'usernames': _sortUsernames(usernames),
        'value': hashtag
    } for count, hashtag, usernames in result]

    user = getUser(u'fluidinfo.com')
    tagValues = TagValueAPI(user)
    objectID = ObjectAPI(user).create(u'fluidinfo.com')
    tagValues.set(
        {objectID: {
            u'fluidinfo.com/trending-hashtags': json.dumps(data)
        }})
    store.commit()
Beispiel #14
0
 def _create_info_query(self, event_query_ta, meta_ta):
     return Select(
         columns=[
             Alias(None, 'entity_id'),
             Alias(None, 'TIMESTAMP'),
             Count().as_('active_entity_count'),
             meta_ta.owner_id.as_('owner_id'),
         ],
         tables=LeftJoin(
             event_query_ta,
             meta_ta,
             on=(meta_ta.entity_id == event_query_ta.entity_id)),
         group_by=[meta_ta.owner_id])
Beispiel #15
0
def get_builder_data():
    """How many working builders are there, how are they configured?"""
    # XXX: This is broken with multi-Processor buildds, as it only
    # considers competition from the same processor.
    per_arch_totals = list(
        IStore(Builder).find(
            (BuilderProcessor.processor_id, Builder.virtualized,
             Count(Builder.id)), BuilderProcessor.builder_id == Builder.id,
            Builder._builderok == True,
            Builder.manual == False).group_by(BuilderProcessor.processor_id,
                                              Builder.virtualized))
    per_virt_totals = list(
        IStore(Builder).find(
            (Builder.virtualized, Count(Builder.id)),
            Builder._builderok == True,
            Builder.manual == False).group_by(Builder.virtualized))

    builder_stats = defaultdict(int)
    for virtualized, count in per_virt_totals:
        builder_stats[(None, virtualized)] = count
    for processor, virtualized, count in per_arch_totals:
        builder_stats[(processor, virtualized)] = count
    return builder_stats
Beispiel #16
0
    def test_find_group_by(self):
        """
        DeferredReference.group_by is a simple wrapper to the group_by method
        of the reference set.
        """
        def cb_find(results):
            results.group_by(Bar.foo_id)
            return results.all().addCallback(check)

        def check(result):
            self.assertEquals(result, [(2, 10)])

        return self.store.find((Count(Bar.id), Bar.foo_id)
            ).addCallback(cb_find)
Beispiel #17
0
    def findPublishedSourcePackageNames(self, distroseries, pocket):
        """Find currently published source packages.

        Returns an iterable of tuples: (name of source package, number of
        publications in Published state).
        """
        looking_for = (
            SourcePackageName.name,
            Count(SourcePackagePublishingHistory.id),
        )
        result = IStore(SourcePackageName).find(
            looking_for, join_spph_spr(), join_spph_spn(),
            self._composeActiveSourcePubsCondition(distroseries, pocket))
        return result.group_by(SourcePackageName.name)
Beispiel #18
0
 def numSubmissionsWithDevice(
     self, bus=None, vendor_id=None, product_id=None, driver_name=None,
     package_name=None, distro_target=None):
     """See `IHWSubmissionSet`."""
     store = IStore(HWSubmission)
     submissions_with_device_select, all_submissions_select = (
         self._submissionsSubmitterSelects(
             Count(), bus, vendor_id, product_id, driver_name,
             package_name, distro_target))
     submissions_with_device = store.execute(
         submissions_with_device_select)
     all_submissions = store.execute(all_submissions_select)
     return (submissions_with_device.get_one()[0],
             all_submissions.get_one()[0])
Beispiel #19
0
    def total_racers(self):
        if hasattr(self, '_total_racers') and self._total_racers is not None:
            return self._total_racers

        store = Store.of(self)
        query = And(Racer.id == RacerLap.racer_id,
                    Racer.category_id == self.id)
        data = store.using(RacerLap, Racer).find((Count(1)), query)
        data = list(data.group_by(Racer.category_id, Racer.id))

        complete_racers = len([i for i in data if i == self.total_laps])
        total_racers = Store.of(self).find(Racer,
                                           Racer.category == self).count()

        self._total_racers = '%s / %s' % (complete_racers, total_racers)
        return self._total_racers
Beispiel #20
0
    def getBuildQueueSizes(self):
        """See `IBuilderSet`."""
        results = ISlaveStore(BuildQueue).find(
            (Count(), Sum(BuildQueue.estimated_duration), Processor,
             Coalesce(BuildQueue.virtualized, True)),
            Processor.id == BuildQueue.processorID,
            BuildQueue.status == BuildQueueStatus.WAITING).group_by(
                Processor, Coalesce(BuildQueue.virtualized, True))

        result_dict = {'virt': {}, 'nonvirt': {}}
        for size, duration, processor, virtualized in results:
            if virtualized is False:
                virt_str = 'nonvirt'
            else:
                virt_str = 'virt'
            result_dict[virt_str][processor.name] = (size, duration)

        return result_dict
    def _validate_product_unique_name(self, name, category):
        """
        Validates product name uniqueness.
        """

        unique_name = generate_product_unique_name(name, category)

        store = get_current_transaction_store()
        result = \
            store.execute(Select(columns=[Count(1)],
                                 where=And(ProductsEntity.product_unique_name == unicode(unique_name)),
                                 tables=[ProductsEntity])).get_one()

        if result is None:
            return

        result, = result
        if result > 0:
            raise ProductsException("Please select another name for product.")
Beispiel #22
0
    def numDevicesInSubmissions(
        self, bus=None, vendor_id=None, product_id=None, driver_name=None,
        package_name=None, distro_target=None):
        """See `IHWSubmissionDeviceSet`."""
        tables, where_clauses = make_submission_device_statistics_clause(
            bus, vendor_id, product_id, driver_name, package_name, False)

        distro_tables, distro_clauses = make_distro_target_clause(
            distro_target)
        if distro_clauses:
            tables.extend(distro_tables)
            where_clauses.extend(distro_clauses)
            where_clauses.append(
                HWSubmissionDevice.submission == HWSubmission.id)

        result = IStore(HWSubmissionDevice).execute(
            Select(
                columns=[Count()], tables=tables, where=And(*where_clauses)))
        return result.get_one()[0]
Beispiel #23
0
    def test_find_having(self):
        """
        DeferredReference.having is a simple wrapper to the having method of
        the reference set.
        """
        connection = self.database.connect()
        connection.execute("INSERT INTO egg VALUES (5, 7)")
        connection.commit()

        def cb_find(results):
            results.group_by(Egg.value)
            results.having(Egg.value >= 5)
            results.order_by(Egg.value)
            return results.all().addCallback(check)

        def check(result):
            self.assertEquals(result, [(1, 5), (2, 7)])

        return self.store.find((Count(Egg.id), Egg.value)
            ).addCallback(cb_find)
Beispiel #24
0
    def _update_summary(self, results):
        total_quantity = total = 0
        for obj in results:
            total_quantity += obj.quantity
            total += obj.total

        queries, having = self.search.parse_states()
        sale_results = self.store.using(*self.search_spec.tables)
        sale_results = sale_results.find(Count(Sale.id, distinct=True))
        if queries:
            sale_results = sale_results.find(And(*queries))

        sales = sale_results.one()
        items_per_sale = total_quantity / sales if sales > 0 else 0

        self.items_label.set_label(_(u'Sales: %s') % format_quantity(sales))
        self.quantity_label.set_label(
            _(u'Quantity: %s') % format_quantity(total_quantity))
        self.items_per_sale_label.set_label(
            _(u'Items per sale: %s') % format_quantity(items_per_sale))
        self.total_label.set_label(
            _(u'Total: %s') % get_formatted_price(total))
def calculate_bugsummary_rows(target):
    """Calculate BugSummary row fragments for the given `IBugTarget`.

    The data is re-aggregated from BugTaskFlat, BugTag and BugSubscription.
    """
    # Use a CTE to prepare a subset of BugTaskFlat, filtered to the
    # relevant target and to exclude duplicates, and with has_patch
    # calculated.
    relevant_tasks = With(
        'relevant_task',
        Select((BugTaskFlat.bug_id, BugTaskFlat.information_type,
                BugTaskFlat.status, BugTaskFlat.milestone_id,
                BugTaskFlat.importance,
                Alias(BugTaskFlat.latest_patch_uploaded != None, 'has_patch'),
                BugTaskFlat.access_grants, BugTaskFlat.access_policies),
               tables=[BugTaskFlat],
               where=And(BugTaskFlat.duplicateof_id == None,
                         *get_bugtaskflat_constraint(target))))

    # Storm class to reference the CTE.
    class RelevantTask(BugTaskFlat):
        __storm_table__ = 'relevant_task'

        has_patch = Bool()

    # Storm class to reference the union.
    class BugSummaryPrototype(RawBugSummary):
        __storm_table__ = 'bugsummary_prototype'

    # Prepare a union for all combination of privacy and taggedness.
    # It'll return a full set of
    # (status, milestone, importance, has_patch, tag, viewed_by, access_policy)
    # rows.
    common_cols = (RelevantTask.status, RelevantTask.milestone_id,
                   RelevantTask.importance, RelevantTask.has_patch)
    null_tag = Alias(Cast(None, 'text'), 'tag')
    null_viewed_by = Alias(Cast(None, 'integer'), 'viewed_by')
    null_policy = Alias(Cast(None, 'integer'), 'access_policy')

    tag_join = Join(BugTag, BugTag.bugID == RelevantTask.bug_id)

    public_constraint = RelevantTask.information_type.is_in(
        PUBLIC_INFORMATION_TYPES)
    private_constraint = RelevantTask.information_type.is_in(
        PRIVATE_INFORMATION_TYPES)

    unions = Union(
        # Public, tagless
        Select(common_cols + (null_tag, null_viewed_by, null_policy),
               tables=[RelevantTask],
               where=public_constraint),
        # Public, tagged
        Select(common_cols + (BugTag.tag, null_viewed_by, null_policy),
               tables=[RelevantTask, tag_join],
               where=public_constraint),
        # Private, access grant, tagless
        Select(common_cols +
               (null_tag, Unnest(RelevantTask.access_grants), null_policy),
               tables=[RelevantTask],
               where=private_constraint),
        # Private, access grant, tagged
        Select(common_cols +
               (BugTag.tag, Unnest(RelevantTask.access_grants), null_policy),
               tables=[RelevantTask, tag_join],
               where=private_constraint),
        # Private, access policy, tagless
        Select(
            common_cols +
            (null_tag, null_viewed_by, Unnest(RelevantTask.access_policies)),
            tables=[RelevantTask],
            where=private_constraint),
        # Private, access policy, tagged
        Select(
            common_cols +
            (BugTag.tag, null_viewed_by, Unnest(RelevantTask.access_policies)),
            tables=[RelevantTask, tag_join],
            where=private_constraint),
        all=True)

    # Select the relevant bits of the prototype rows and aggregate them.
    proto_key_cols = (BugSummaryPrototype.status,
                      BugSummaryPrototype.milestone_id,
                      BugSummaryPrototype.importance,
                      BugSummaryPrototype.has_patch, BugSummaryPrototype.tag,
                      BugSummaryPrototype.viewed_by_id,
                      BugSummaryPrototype.access_policy_id)
    origin = IStore(BugTaskFlat).with_(relevant_tasks).using(
        Alias(unions, 'bugsummary_prototype'))
    results = origin.find(proto_key_cols + (Count(), ))
    results = results.group_by(*proto_key_cols).order_by(*proto_key_cols)
    return results
Beispiel #26
0
 def post_search_callback(cls, sresults):
     select = sresults.get_select_expr(Count(1), Sum(cls.total))
     return ('count', 'sum'), select
Beispiel #27
0
def get_receivertip_list(store, receiver_id, language):
    rtip_summary_list = []

    rtips = store.find(models.ReceiverTip, receiver_id=receiver_id)
    itips_ids = [rtip.internaltip_id for rtip in rtips]

    itips_by_id = {}
    aqs_by_itip = {}
    comments_by_itip = {}
    internalfiles_by_itip = {}
    messages_by_rtip = {}

    for itip, archivedschema in store.find(
        (models.InternalTip, models.ArchivedSchema),
            In(models.InternalTip.id, itips_ids), models.ArchivedSchema.hash ==
            models.InternalTip.questionnaire_hash,
            models.ArchivedSchema.type == u'preview'):
        itips_by_id[itip.id] = itip
        aqs_by_itip[itip.id] = archivedschema

    result = store.find(
        (models.ReceiverTip.id, Count()),
        models.ReceiverTip.receiver_id == receiver_id,
        models.ReceiverTip.id == models.Message.receivertip_id).group_by(
            models.ReceiverTip)
    for rtip_id, count in result:
        messages_by_rtip[rtip_id] = count

    result = store.find(
        (models.InternalTip.id, Count()), In(models.InternalTip.id, itips_ids),
        models.InternalTip.id == models.Comment.internaltip_id).group_by(
            models.InternalTip)
    for itip_id, count in result:
        comments_by_itip[itip_id] = count

    result = store.find(
        (models.InternalTip.id, Count()), In(models.InternalTip.id, itips_ids),
        models.InternalTip.id == models.InternalFile.internaltip_id).group_by(
            models.InternalTip)
    for itip_id, count in result:
        internalfiles_by_itip[itip_id] = count

    for rtip in rtips:
        internaltip = itips_by_id[rtip.internaltip_id]
        archivedschema = aqs_by_itip[rtip.internaltip_id]

        rtip_summary_list.append({
            'id':
            rtip.id,
            'creation_date':
            datetime_to_ISO8601(internaltip.creation_date),
            'last_access':
            datetime_to_ISO8601(rtip.last_access),
            'update_date':
            datetime_to_ISO8601(internaltip.update_date),
            'expiration_date':
            datetime_to_ISO8601(internaltip.expiration_date),
            'progressive':
            internaltip.progressive,
            'new':
            rtip.access_counter == 0
            or rtip.last_access < internaltip.update_date,
            'context_id':
            internaltip.context_id,
            'access_counter':
            rtip.access_counter,
            'file_counter':
            internalfiles_by_itip.get(internaltip.id, 0),
            'comment_counter':
            comments_by_itip.get(internaltip.id, 0),
            'message_counter':
            messages_by_rtip.get(rtip.id, 0),
            'tor2web':
            internaltip.tor2web,
            'preview_schema':
            db_serialize_archived_preview_schema(store, archivedschema,
                                                 language),
            'preview':
            internaltip.preview,
            'total_score':
            internaltip.total_score,
            'label':
            rtip.label
        })

    return rtip_summary_list
Beispiel #28
0
 def post_search_callback(cls, sresults):
     select = sresults.get_select_expr(
         Count(Distinct(Sellable.id)), Sum(Field('_stock_summary',
                                                 'stock')))
     return ('count', 'sum'), select
Beispiel #29
0

class WorkOrderFinishedView(WorkOrderView):
    """A view for finished |workorders|

    This is the same as :class:`.WorkOrderView`, but only finished
    orders are showed here.
    """

    clause = WorkOrder.status == WorkOrder.STATUS_WORK_FINISHED


_WorkOrderPackageItemsSummary = Alias(
    Select(columns=[
        WorkOrderPackageItem.package_id,
        Alias(Count(WorkOrderPackageItem.id), 'quantity')
    ],
           tables=[WorkOrderPackageItem],
           group_by=[WorkOrderPackageItem.package_id]), '_package_items')


class WorkOrderPackageView(Viewable):
    """A view for |workorderpackages|

    This is used to get the most information of a |workorderpackage|
    without doing lots of database queries.
    """

    _BranchSource = ClassAlias(Branch, "branch_source")
    _BranchDestination = ClassAlias(Branch, "branch_destination")
    _PersonSource = ClassAlias(Person, "person_source")
Beispiel #30
0
from stoqlib.domain.payment.operation import get_payment_operation
from stoqlib.domain.payment.payment import Payment, PaymentChangeHistory
from stoqlib.domain.payment.renegotiation import PaymentRenegotiation
from stoqlib.domain.person import Person
from stoqlib.domain.purchase import PurchaseOrder
from stoqlib.domain.sale import Sale
from stoqlib.lib.dateutils import localtoday
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext


_ = stoqlib_gettext


_CommentsSummary = Select(columns=[PaymentComment.payment_id,
                                   Alias(Count(PaymentComment.id), 'comments_number')],
                          tables=[PaymentComment],
                          group_by=[PaymentComment.payment_id]),
CommentsSummary = Alias(_CommentsSummary, '_comments')


class BasePaymentView(Viewable):
    PaymentGroup_Sale = ClassAlias(PaymentGroup, 'payment_group_sale')
    PaymentGroup_Purchase = ClassAlias(PaymentGroup, 'payment_group_purchase')

    payment = Payment
    group = PaymentGroup
    purchase = PurchaseOrder
    sale = Sale
    method = PaymentMethod