Esempio n. 1
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(distinct(Thread.id)))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = _threads_filters(namespace_id, thread_public_id, started_before,
                              started_after, last_message_before,
                              last_message_after, subject)

    query = _threads_join_category(query, namespace_id, in_)
    query = query.filter(*filters)
    for subquery in _threads_subqueries(namespace_id, from_addr, to_addr,
                                       cc_addr, bcc_addr, any_email, filename,
                                       unread, starred, db_session):
        query = query.filter(Thread.id.in_(subquery))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
Esempio n. 2
0
def format_transactions_after_pointer(namespace,
                                      pointer,
                                      db_session,
                                      result_limit,
                                      exclude_types=None,
                                      include_types=None,
                                      exclude_folders=True,
                                      expand=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    exclude_types.add('account')
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    # End backwards-compatibility shim.

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        # deleted_at condition included to allow this query to be satisfied via
        # the legacy index on (namespace_id, deleted_at) for performance.
        # Also need to explicitly specify the index hint because the query
        # planner is dumb as nails and otherwise would make this super slow for
        # some values of namespace_id and pointer.
        # TODO(emfree): Remove this hack and ensure that the right index (on
        # namespace_id only) exists.
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id,
                Transaction.deleted_at.is_(None)). \
            with_hint(Transaction, 'USE INDEX (namespace_id_deleted_at)')

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx
                           for trx in sorted(trxs, key=lambda t: t.id)
                           }.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [
                trx.record_id for trx in latest_trxs if trx.command != 'delete'
            ]

            object_cls = transaction_objects()[obj_type]
            query = db_session.query(object_cls).filter(
                object_cls.id.in_(ids_to_query),
                object_cls.namespace_id == namespace.id)
            if object_cls == Thread:
                query = query.options(*Thread.api_loading_options(expand))
            elif object_cls == Message:
                query = query.options(*Message.api_loading_options(expand))
            objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(obj,
                                   namespace_public_id=namespace.public_id,
                                   expand=expand)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
Esempio n. 3
0
def threads(
    namespace_id,
    subject,
    from_addr,
    to_addr,
    cc_addr,
    bcc_addr,
    any_email,
    message_id_header,
    thread_public_id,
    started_before,
    started_after,
    last_message_before,
    last_message_after,
    filename,
    in_,
    unread,
    starred,
    limit,
    offset,
    view,
    db_session,
):

    if view == "count":
        query = db_session.query(func.count(Thread.id))
    elif view == "ids":
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id, Thread.deleted_at == None]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = contact_subquery(db_session, namespace_id, from_addr,
                                      "from_addr")
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = contact_subquery(db_session, namespace_id, to_addr,
                                    "to_addr")
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = contact_subquery(db_session, namespace_id, cc_addr,
                                    "cc_addr")
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = contact_subquery(db_session, namespace_id, bcc_addr,
                                     "bcc_addr")
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = (db_session.query(
            Message.thread_id).join(MessageContactAssociation).join(
                Contact,
                MessageContactAssociation.contact_id == Contact.id).filter(
                    Contact.email_address.in_(any_email),
                    Contact.namespace_id == namespace_id,
                ).subquery())
        query = query.filter(Thread.id.in_(any_contact_query))

    if message_id_header is not None:
        message_id_query = db_session.query(Message.thread_id).filter(
            Message.message_id_header == message_id_header)
        query = query.filter(Thread.id.in_(message_id_query))

    if filename is not None:
        files_query = (db_session.query(
            Message.thread_id).join(Part).join(Block).filter(
                Block.filename == filename,
                Block.namespace_id == namespace_id).subquery())
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = (db_session.query(
            Message.thread_id).prefix_with("STRAIGHT_JOIN").join(
                Message.messagecategories).join(
                    MessageCategory.category).filter(
                        Category.namespace_id == namespace_id,
                        or_(*category_filters)).subquery())
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = (db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery())
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = (db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery())
        query = query.filter(Thread.id.in_(starred_query))

    if view == "count":
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != "ids":
        expand = view == "expanded"
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == "ids":
        return [x[0] for x in query.all()]

    return query.all()
Esempio n. 4
0
def format_transactions_after_pointer(namespace, pointer, db_session,
                                      result_limit, exclude_types=None,
                                      include_types=None, exclude_folders=True,
                                      exclude_metadata=True, exclude_account=True,
                                      expand=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    if exclude_account is True:
        exclude_types.add('account')
    # End backwards-compatibility shim.

    # Metadata is excluded by default, and can only be included by setting the
    # exclude_metadata flag to False. If listed in include_types, remove it.
    if exclude_metadata is True:
        exclude_types.add('metadata')
    if include_types is not None and 'metadata' in include_types:
        include_types.remove('metadata')

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        # deleted_at condition included to allow this query to be satisfied via
        # the legacy index on (namespace_id, deleted_at) for performance.
        # Also need to explicitly specify the index hint because the query
        # planner is dumb as nails and otherwise would make this super slow for
        # some values of namespace_id and pointer.
        # TODO(emfree): Remove this hack and ensure that the right index (on
        # namespace_id only) exists.
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id,
                Transaction.deleted_at.is_(None)). \
            with_hint(Transaction, 'USE INDEX (namespace_id_deleted_at)')

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx for trx in
                           sorted(trxs, key=lambda t: t.id)}.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [trx.record_id for trx in latest_trxs
                            if trx.command != 'delete']

            object_cls = transaction_objects()[obj_type]

            if object_cls == Account:
                # The base query for Account queries the /Namespace/ table
                # since the API-returned "`account`" is a `namespace`
                # under-the-hood.
                query = db_session.query(Namespace).join(Account).filter(
                    Account.id.in_(ids_to_query),
                    Namespace.id == namespace.id)

                # Key by /namespace.account_id/ --
                # namespace.id may not be equal to account.id
                # and trx.record_id == account.id for `account` trxs.
                objects = {obj.account_id: obj for obj in query}
            else:
                query = db_session.query(object_cls).filter(
                    object_cls.id.in_(ids_to_query),
                    object_cls.namespace_id == namespace.id)

                if object_cls == Thread:
                    query = query.options(*Thread.api_loading_options(expand))
                elif object_cls == Message:
                    query = query.options(*Message.api_loading_options(expand))

                objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(
                        obj, namespace_public_id=namespace.public_id,
                        expand=expand)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
Esempio n. 5
0
def format_transactions_after_pointer(namespace,
                                      pointer,
                                      db_session,
                                      result_limit,
                                      exclude_types=None,
                                      include_types=None,
                                      exclude_folders=True,
                                      exclude_metadata=True,
                                      exclude_account=True,
                                      expand=False,
                                      is_n1=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    if exclude_account is True:
        exclude_types.add('account')
    # End backwards-compatibility shim.

    # Metadata is excluded by default, and can only be included by setting the
    # exclude_metadata flag to False. If listed in include_types, remove it.
    if exclude_metadata is True:
        exclude_types.add('metadata')
    if include_types is not None and 'metadata' in include_types:
        include_types.remove('metadata')

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id)

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx
                           for trx in sorted(trxs, key=lambda t: t.id)
                           }.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [
                trx.record_id for trx in latest_trxs if trx.command != 'delete'
            ]

            object_cls = transaction_objects()[obj_type]

            if object_cls == Account:
                # The base query for Account queries the /Namespace/ table
                # since the API-returned "`account`" is a `namespace`
                # under-the-hood.
                query = db_session.query(Namespace).join(Account).filter(
                    Account.id.in_(ids_to_query), Namespace.id == namespace.id)

                # Key by /namespace.account_id/ --
                # namespace.id may not be equal to account.id
                # and trx.record_id == account.id for `account` trxs.
                objects = {obj.account_id: obj for obj in query}
            else:
                query = db_session.query(object_cls).filter(
                    object_cls.id.in_(ids_to_query),
                    object_cls.namespace_id == namespace.id)

                if object_cls == Thread:
                    query = query.options(*Thread.api_loading_options(expand))
                elif object_cls == Message:
                    query = query.options(*Message.api_loading_options(expand))

                objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(obj,
                                   namespace_public_id=namespace.public_id,
                                   expand=expand,
                                   is_n1=is_n1)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
Esempio n. 6
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == from_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'from_addr').subquery()
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == to_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'to_addr').subquery()
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == cc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'cc_addr').subquery()
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == bcc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'bcc_addr').subquery()
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address == any_email,
                   Contact.namespace_id == namespace_id).subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            join(MessageCategory).join(Category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
Esempio n. 7
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == from_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'from_addr').subquery()
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == to_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'to_addr').subquery()
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == cc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'cc_addr').subquery()
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == bcc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'bcc_addr').subquery()
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address.in_(any_email),
                   Contact.namespace_id == namespace_id).subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            join(MessageCategory).join(Category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
Esempio n. 8
0
def threads(namespace_id,
            subject,
            from_addr,
            to_addr,
            cc_addr,
            bcc_addr,
            any_email,
            thread_public_id,
            started_before,
            started_after,
            last_message_before,
            last_message_after,
            filename,
            in_,
            unread,
            starred,
            limit,
            offset,
            view,
            db_session,
            sort_field='recentdate',
            sort_order='desc'):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id, Thread.deleted_at == None]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = contact_subquery(db_session, namespace_id, from_addr,
                                      'from_addr')
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = contact_subquery(db_session, namespace_id, to_addr,
                                    'to_addr')
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = contact_subquery(db_session, namespace_id, cc_addr,
                                    'cc_addr')
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = contact_subquery(db_session, namespace_id, bcc_addr,
                                     'bcc_addr')
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id) \
            .join(MessageContactAssociation) \
            .join(Contact, MessageContactAssociation.contact_id == Contact.id)\
            .filter(Contact.email_address.in_(any_email),
                    Contact.namespace_id == namespace_id)\
            .subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            prefix_with('STRAIGHT_JOIN'). \
            join(Message.messagecategories).join(MessageCategory.category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    #Sort by field and direction
    sort_field = sort_field.lower() if sort_field is not None else None
    sort_order = sort_order.lower() if sort_order is not None else None
    db_sort_field = Thread.recentdate
    db_sort_order = desc

    sort_field_mapping = {
        'subject': Thread.subject,
        'date': Thread.recentdate,
        'start_date': Thread.subjectdate
    }
    if sort_field in sort_field_mapping:
        db_sort_field = sort_field_mapping[sort_field]

    if sort_order == 'asc':
        db_sort_order = asc

    query = query.order_by(db_sort_order(db_sort_field)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()