示例#1
0
def test_namespace_limiting(db, api_client, default_namespaces):
    dt = datetime.datetime.utcnow()
    subject = dt.isoformat()
    namespaces = db.session.query(Namespace).all()
    assert len(namespaces) > 1
    for ns in namespaces:
        thread = Thread(namespace=ns,
                        subjectdate=dt,
                        recentdate=dt,
                        subject=subject)
        add_fake_message(db.session,
                         ns.id,
                         thread,
                         received_date=dt,
                         subject=subject)
        db.session.add(Block(namespace=ns, filename=subject))
    db.session.commit()

    for ns in namespaces:
        r = api_client.get_data('/threads?subject={}'.format(subject))
        assert len(r) == 1

        r = api_client.get_data('/messages?subject={}'.format(subject))
        assert len(r) == 1

        r = api_client.get_data('/files?filename={}'.format(subject))
        assert len(r) == 1
示例#2
0
def add_fake_thread(db_session, namespace_id):
    from inbox.models import Thread
    dt = datetime.utcnow()
    thr = Thread(subjectdate=dt, recentdate=dt, namespace_id=namespace_id)
    db_session.add(thr)
    db_session.commit()
    return thr
示例#3
0
def test_namespace_limiting(db, test_client):
    dt = datetime.datetime.utcnow()
    subject = dt.isoformat()
    db.session.add(Namespace())
    db.session.commit()
    namespaces = db.session.query(Namespace).all()
    assert len(namespaces) > 1
    for ns in namespaces:
        thread = Thread(namespace=ns, subjectdate=dt, recentdate=dt,
                        subject=subject)
        add_fake_message(db.session, ns.id, thread, received_date=dt,
                         subject=subject)
        db.session.add(Block(namespace=ns, filename=subject))
    db.session.commit()

    for ns in namespaces:
        r = json.loads(test_client.get('/n/{}/threads?subject={}'.
                                       format(ns.public_id, subject)).data)
        assert len(r) == 1

        r = json.loads(test_client.get('/n/{}/messages?subject={}'.
                                       format(ns.public_id, subject)).data)
        assert len(r) == 1

        r = json.loads(test_client.get('/n/{}/files?filename={}'.
                                       format(ns.public_id, subject)).data)
        assert len(r) == 1
示例#4
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(distinct(Thread.id)))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = _threads_filters(namespace_id, thread_public_id, started_before,
                              started_after, last_message_before,
                              last_message_after, subject)

    query = _threads_join_category(query, namespace_id, in_)
    query = query.filter(*filters)
    for subquery in _threads_subqueries(namespace_id, from_addr, to_addr,
                                       cc_addr, bcc_addr, any_email, filename,
                                       unread, starred, db_session):
        query = query.filter(Thread.id.in_(subquery))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
示例#5
0
def format_transactions_after_pointer(namespace,
                                      pointer,
                                      db_session,
                                      result_limit,
                                      exclude_types=None,
                                      include_types=None,
                                      exclude_folders=True,
                                      expand=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    exclude_types.add('account')
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    # End backwards-compatibility shim.

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        # deleted_at condition included to allow this query to be satisfied via
        # the legacy index on (namespace_id, deleted_at) for performance.
        # Also need to explicitly specify the index hint because the query
        # planner is dumb as nails and otherwise would make this super slow for
        # some values of namespace_id and pointer.
        # TODO(emfree): Remove this hack and ensure that the right index (on
        # namespace_id only) exists.
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id,
                Transaction.deleted_at.is_(None)). \
            with_hint(Transaction, 'USE INDEX (namespace_id_deleted_at)')

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx
                           for trx in sorted(trxs, key=lambda t: t.id)
                           }.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [
                trx.record_id for trx in latest_trxs if trx.command != 'delete'
            ]

            object_cls = transaction_objects()[obj_type]
            query = db_session.query(object_cls).filter(
                object_cls.id.in_(ids_to_query),
                object_cls.namespace_id == namespace.id)
            if object_cls == Thread:
                query = query.options(*Thread.api_loading_options(expand))
            elif object_cls == Message:
                query = query.options(*Message.api_loading_options(expand))
            objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(obj,
                                   namespace_public_id=namespace.public_id,
                                   expand=expand)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
示例#6
0
def threads(
    namespace_id,
    subject,
    from_addr,
    to_addr,
    cc_addr,
    bcc_addr,
    any_email,
    message_id_header,
    thread_public_id,
    started_before,
    started_after,
    last_message_before,
    last_message_after,
    filename,
    in_,
    unread,
    starred,
    limit,
    offset,
    view,
    db_session,
):

    if view == "count":
        query = db_session.query(func.count(Thread.id))
    elif view == "ids":
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id, Thread.deleted_at == None]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = contact_subquery(db_session, namespace_id, from_addr,
                                      "from_addr")
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = contact_subquery(db_session, namespace_id, to_addr,
                                    "to_addr")
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = contact_subquery(db_session, namespace_id, cc_addr,
                                    "cc_addr")
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = contact_subquery(db_session, namespace_id, bcc_addr,
                                     "bcc_addr")
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = (db_session.query(
            Message.thread_id).join(MessageContactAssociation).join(
                Contact,
                MessageContactAssociation.contact_id == Contact.id).filter(
                    Contact.email_address.in_(any_email),
                    Contact.namespace_id == namespace_id,
                ).subquery())
        query = query.filter(Thread.id.in_(any_contact_query))

    if message_id_header is not None:
        message_id_query = db_session.query(Message.thread_id).filter(
            Message.message_id_header == message_id_header)
        query = query.filter(Thread.id.in_(message_id_query))

    if filename is not None:
        files_query = (db_session.query(
            Message.thread_id).join(Part).join(Block).filter(
                Block.filename == filename,
                Block.namespace_id == namespace_id).subquery())
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = (db_session.query(
            Message.thread_id).prefix_with("STRAIGHT_JOIN").join(
                Message.messagecategories).join(
                    MessageCategory.category).filter(
                        Category.namespace_id == namespace_id,
                        or_(*category_filters)).subquery())
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = (db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery())
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = (db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery())
        query = query.filter(Thread.id.in_(starred_query))

    if view == "count":
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != "ids":
        expand = view == "expanded"
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == "ids":
        return [x[0] for x in query.all()]

    return query.all()
示例#7
0
def format_transactions_after_pointer(namespace, pointer, db_session,
                                      result_limit, exclude_types=None,
                                      include_types=None, exclude_folders=True,
                                      exclude_metadata=True, exclude_account=True,
                                      expand=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    if exclude_account is True:
        exclude_types.add('account')
    # End backwards-compatibility shim.

    # Metadata is excluded by default, and can only be included by setting the
    # exclude_metadata flag to False. If listed in include_types, remove it.
    if exclude_metadata is True:
        exclude_types.add('metadata')
    if include_types is not None and 'metadata' in include_types:
        include_types.remove('metadata')

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        # deleted_at condition included to allow this query to be satisfied via
        # the legacy index on (namespace_id, deleted_at) for performance.
        # Also need to explicitly specify the index hint because the query
        # planner is dumb as nails and otherwise would make this super slow for
        # some values of namespace_id and pointer.
        # TODO(emfree): Remove this hack and ensure that the right index (on
        # namespace_id only) exists.
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id,
                Transaction.deleted_at.is_(None)). \
            with_hint(Transaction, 'USE INDEX (namespace_id_deleted_at)')

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx for trx in
                           sorted(trxs, key=lambda t: t.id)}.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [trx.record_id for trx in latest_trxs
                            if trx.command != 'delete']

            object_cls = transaction_objects()[obj_type]

            if object_cls == Account:
                # The base query for Account queries the /Namespace/ table
                # since the API-returned "`account`" is a `namespace`
                # under-the-hood.
                query = db_session.query(Namespace).join(Account).filter(
                    Account.id.in_(ids_to_query),
                    Namespace.id == namespace.id)

                # Key by /namespace.account_id/ --
                # namespace.id may not be equal to account.id
                # and trx.record_id == account.id for `account` trxs.
                objects = {obj.account_id: obj for obj in query}
            else:
                query = db_session.query(object_cls).filter(
                    object_cls.id.in_(ids_to_query),
                    object_cls.namespace_id == namespace.id)

                if object_cls == Thread:
                    query = query.options(*Thread.api_loading_options(expand))
                elif object_cls == Message:
                    query = query.options(*Message.api_loading_options(expand))

                objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(
                        obj, namespace_public_id=namespace.public_id,
                        expand=expand)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
示例#8
0
def create_and_save_draft(
    db_session,
    account,
    to_addr=None,
    subject=None,
    body=None,
    blocks=None,
    cc_addr=None,
    bcc_addr=None,
    new_tags=None,
    thread=None,
    is_reply=False,
    parent_draft=None,
):
    """
    Create a draft object and commit it to the database.
    """
    dt = datetime.utcnow()
    uid = generate_public_id()
    to_addr = to_addr or []
    cc_addr = cc_addr or []
    bcc_addr = bcc_addr or []
    blocks = blocks or []
    body = body or ""
    if subject is None and thread is not None:
        # Set subject from thread by default.
        subject = thread.subject
    subject = subject or ""
    message = SpoolMessage()
    message.from_addr = [(account.sender_name, account.email_address)]
    message.created_date = dt
    # TODO(emfree): we should maybe make received_date nullable, so its value
    # doesn't change in the case of a drafted-and-later-reconciled message.
    message.received_date = dt
    message.is_sent = False
    message.state = "draft"
    if parent_draft is not None:
        message.parent_draft_id = parent_draft.id
    message.subject = subject
    message.sanitized_body = body
    message.to_addr = to_addr
    message.cc_addr = cc_addr
    message.bcc_addr = bcc_addr
    # TODO(emfree): this is different from the normal 'size' value of a
    # message, which is the size of the entire MIME message.
    message.size = len(body)
    message.is_draft = True
    message.is_read = True
    message.inbox_uid = uid
    message.public_id = uid

    # Set the snippet
    message.calculate_html_snippet(body)

    # Associate attachments to the draft message
    for block in blocks:
        # Create a new Part object to associate to the message object.
        # (You can't just set block.message, because if block is an attachment
        # on an existing message, that would dissociate it from the existing
        # message.)
        part = Part()
        part.namespace_id = account.namespace.id
        part.content_disposition = "attachment"
        part.content_type = block.content_type
        part.is_inboxapp_attachment = True
        part.data = block.data
        message.parts.append(part)
        db_session.add(part)

    # TODO(emfree) Update contact data here.

    if is_reply:
        message.is_reply = True
        # If we're updating a draft, copy the in-reply-to and references
        # headers from the parent. Otherwise, construct them from the last
        # message currently in the thread.
        if parent_draft is not None:
            message.in_reply_to = parent_draft.in_reply_to
            message.references = parent_draft.references
        else:
            # Make sure that the headers are constructed from an actual
            # previous message on the thread, not another draft
            non_draft_messages = [m for m in thread.messages if not m.is_draft]
            if non_draft_messages:
                last_message = non_draft_messages[-1]
                message.in_reply_to = last_message.message_id_header
                message.references = last_message.references + "\t" + last_message.message_id_header
    if thread is None:
        # Create a new thread object for the draft.
        thread = Thread(
            subject=message.subject,
            recentdate=message.received_date,
            namespace=account.namespace,
            subjectdate=message.received_date,
        )
        db_session.add(thread)

    message.thread = thread
    # This triggers an autoflush, so we need to execute it after setting
    # message.thread
    thread.apply_tag(account.namespace.tags["drafts"])

    if new_tags:
        tags_to_keep = {tag for tag in thread.tags if not tag.user_created}
        thread.tags = new_tags | tags_to_keep

    schedule_action("save_draft", message, message.namespace.id, db_session)

    db_session.add(message)
    db_session.commit()
    return message
示例#9
0
文件: base.py 项目: nixon1333/inbox
def create_and_save_draft(db_session, account, to_addr=None, subject=None,
                          body=None, blocks=None, cc_addr=None, bcc_addr=None,
                          new_tags=None, thread=None, is_reply=False,
                          syncback=True):
    """Create a draft object and commit it to the database."""
    dt = datetime.utcnow()
    uid = generate_public_id()
    version = generate_public_id()
    to_addr = to_addr or []
    cc_addr = cc_addr or []
    bcc_addr = bcc_addr or []
    blocks = blocks or []
    body = body or ''
    if subject is None and thread is not None:
        # Set subject from thread by default.
        subject = thread.subject
    subject = subject or ''

    # Sets is_draft = True, state = 'draft'
    message = Message.create_draft_message()

    message.from_addr = [(account.sender_name, account.email_address)]
    # TODO(emfree): we should maybe make received_date nullable, so its value
    # doesn't change in the case of a drafted-and-later-reconciled message.
    message.received_date = dt
    message.subject = subject
    message.sanitized_body = body
    message.to_addr = to_addr
    message.cc_addr = cc_addr
    message.bcc_addr = bcc_addr
    # TODO(emfree): this is different from the normal 'size' value of a
    # message, which is the size of the entire MIME message.
    message.size = len(body)
    message.is_read = True
    message.is_sent = False
    message.is_reply = is_reply
    message.public_id = uid
    message.version = version
    message.inbox_uid = version

    # Set the snippet
    message.calculate_html_snippet(body)

    # Associate attachments to the draft message
    for block in blocks:
        # Create a new Part object to associate to the message object.
        # (You can't just set block.message, because if block is an attachment
        # on an existing message, that would dissociate it from the existing
        # message.)
        part = Part()
        part.namespace_id = account.namespace.id
        part.content_disposition = 'attachment'
        part.content_type = block.content_type
        part.is_inboxapp_attachment = True
        part.data = block.data
        part.filename = block.filename
        message.parts.append(part)
        db_session.add(part)

    # TODO(emfree) Update contact data here.

    if is_reply:
        message.is_reply = True
        # Construct the in-reply-to and references headers from the last
        # message currently in the thread.
        _set_reply_headers(message, thread)
    if thread is None:
        # Create a new thread object for the draft.
        thread = Thread(
            subject=message.subject,
            recentdate=message.received_date,
            namespace=account.namespace,
            subjectdate=message.received_date)
        db_session.add(thread)

    message.thread = thread
    # This triggers an autoflush, so we need to execute it after setting
    # message.thread
    thread.apply_tag(account.namespace.tags['drafts'])

    if new_tags:
        tags_to_keep = {tag for tag in thread.tags if not tag.user_created}
        thread.tags = new_tags | tags_to_keep

    if syncback:
        schedule_action('save_draft', message, message.namespace.id,
                        db_session)

    db_session.add(message)
    db_session.commit()
    return message
示例#10
0
def format_transactions_after_pointer(namespace,
                                      pointer,
                                      db_session,
                                      result_limit,
                                      exclude_types=None,
                                      include_types=None,
                                      exclude_folders=True,
                                      exclude_metadata=True,
                                      exclude_account=True,
                                      expand=False,
                                      is_n1=False):
    """
    Return a pair (deltas, new_pointer), where deltas is a list of change
    events, represented as dictionaries:
    {
      "object": <API object type, e.g. "thread">,
      "event": <"create", "modify", or "delete>,
      "attributes": <API representation of the object for insert/update events>
      "cursor": <public_id of the transaction>
    }

    and new_pointer is the integer id of the last included transaction

    Arguments
    ---------
    namespace_id: int
        Id of the namespace for which to get changes.
    pointer: int
        Process transactions starting after this id.
    db_session: new_session
        database session
    result_limit: int
        Maximum number of results to return. (Because we may roll up multiple
        changes to the same object, fewer results can be returned.)
    format_transaction_fn: function pointer
        Function that defines how to format the transactions.
    exclude_types: list, optional
        If given, don't include transactions for these types of objects.

    """
    exclude_types = set(exclude_types) if exclude_types else set()
    # Begin backwards-compatibility shim -- suppress new object types for now,
    # because clients may not be able to deal with them.
    if exclude_folders is True:
        exclude_types.update(('folder', 'label'))
    if exclude_account is True:
        exclude_types.add('account')
    # End backwards-compatibility shim.

    # Metadata is excluded by default, and can only be included by setting the
    # exclude_metadata flag to False. If listed in include_types, remove it.
    if exclude_metadata is True:
        exclude_types.add('metadata')
    if include_types is not None and 'metadata' in include_types:
        include_types.remove('metadata')

    last_trx = _get_last_trx_id_for_namespace(namespace.id, db_session)
    if last_trx == pointer:
        return ([], pointer)

    while True:
        transactions = db_session.query(Transaction). \
            filter(
                Transaction.id > pointer,
                Transaction.namespace_id == namespace.id)

        if exclude_types is not None:
            transactions = transactions.filter(
                ~Transaction.object_type.in_(exclude_types))

        if include_types is not None:
            transactions = transactions.filter(
                Transaction.object_type.in_(include_types))

        transactions = transactions. \
            order_by(asc(Transaction.id)).limit(result_limit).all()

        if not transactions:
            return ([], pointer)

        results = []

        # Group deltas by object type.
        trxs_by_obj_type = collections.defaultdict(list)
        for trx in transactions:
            trxs_by_obj_type[trx.object_type].append(trx)

        for obj_type, trxs in trxs_by_obj_type.items():
            # Build a dictionary mapping pairs (record_id, command) to
            # transaction. If successive modifies for a given record id appear
            # in the list of transactions, this will only keep the latest
            # one (which is what we want).
            latest_trxs = {(trx.record_id, trx.command): trx
                           for trx in sorted(trxs, key=lambda t: t.id)
                           }.values()
            # Load all referenced not-deleted objects.
            ids_to_query = [
                trx.record_id for trx in latest_trxs if trx.command != 'delete'
            ]

            object_cls = transaction_objects()[obj_type]

            if object_cls == Account:
                # The base query for Account queries the /Namespace/ table
                # since the API-returned "`account`" is a `namespace`
                # under-the-hood.
                query = db_session.query(Namespace).join(Account).filter(
                    Account.id.in_(ids_to_query), Namespace.id == namespace.id)

                # Key by /namespace.account_id/ --
                # namespace.id may not be equal to account.id
                # and trx.record_id == account.id for `account` trxs.
                objects = {obj.account_id: obj for obj in query}
            else:
                query = db_session.query(object_cls).filter(
                    object_cls.id.in_(ids_to_query),
                    object_cls.namespace_id == namespace.id)

                if object_cls == Thread:
                    query = query.options(*Thread.api_loading_options(expand))
                elif object_cls == Message:
                    query = query.options(*Message.api_loading_options(expand))

                objects = {obj.id: obj for obj in query}

            for trx in latest_trxs:
                delta = {
                    'object': trx.object_type,
                    'event': EVENT_NAME_FOR_COMMAND[trx.command],
                    'id': trx.object_public_id,
                    'cursor': trx.public_id
                }
                if trx.command != 'delete':
                    obj = objects.get(trx.record_id)
                    if obj is None:
                        continue
                    repr_ = encode(obj,
                                   namespace_public_id=namespace.public_id,
                                   expand=expand,
                                   is_n1=is_n1)
                    delta['attributes'] = repr_

                results.append((trx.id, delta))

        if results:
            # Sort deltas by id of the underlying transactions.
            results.sort()
            deltas = [d for _, d in results]
            return (deltas, results[-1][0])
        else:
            # It's possible that none of the referenced objects exist any more,
            # meaning the result list is empty. In that case, keep traversing
            # the log until we get actual results or reach the end.
            pointer = transactions[-1].id
示例#11
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == from_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'from_addr').subquery()
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == to_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'to_addr').subquery()
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == cc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'cc_addr').subquery()
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == bcc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'bcc_addr').subquery()
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address == any_email,
                   Contact.namespace_id == namespace_id).subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            join(MessageCategory).join(Category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
示例#12
0
def threads(namespace_id, subject, from_addr, to_addr, cc_addr, bcc_addr,
            any_email, thread_public_id, started_before, started_after,
            last_message_before, last_message_after, filename, in_, unread,
            starred, limit, offset, view, db_session):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == from_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'from_addr').subquery()
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == to_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'to_addr').subquery()
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == cc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'cc_addr').subquery()
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact).filter(
                Contact.email_address == bcc_addr,
                Contact.namespace_id == namespace_id,
                MessageContactAssociation.field == 'bcc_addr').subquery()
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id). \
            join(MessageContactAssociation).join(Contact). \
            filter(Contact.email_address.in_(any_email),
                   Contact.namespace_id == namespace_id).subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            join(MessageCategory).join(Category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    query = query.order_by(desc(Thread.recentdate)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()
示例#13
0
def create_and_save_draft(db_session,
                          account,
                          to_addr=None,
                          subject=None,
                          body=None,
                          blocks=None,
                          cc_addr=None,
                          bcc_addr=None,
                          new_tags=None,
                          thread=None,
                          is_reply=False,
                          parent_draft=None,
                          syncback=True):
    """
    Create a draft object and commit it to the database.
    """
    dt = datetime.utcnow()
    uid = generate_public_id()
    to_addr = to_addr or []
    cc_addr = cc_addr or []
    bcc_addr = bcc_addr or []
    blocks = blocks or []
    body = body or ''
    if subject is None and thread is not None:
        # Set subject from thread by default.
        subject = thread.subject
    subject = subject or ''

    # Sets is_draft = True, state = 'draft'
    message = Message.create_draft_message()

    message.from_addr = [(account.sender_name, account.email_address)]
    # TODO(emfree): we should maybe make received_date nullable, so its value
    # doesn't change in the case of a drafted-and-later-reconciled message.
    message.received_date = dt
    if parent_draft is not None:
        message.parent_draft_id = parent_draft.id
    message.subject = subject
    message.sanitized_body = body
    message.to_addr = to_addr
    message.cc_addr = cc_addr
    message.bcc_addr = bcc_addr
    # TODO(emfree): this is different from the normal 'size' value of a
    # message, which is the size of the entire MIME message.
    message.size = len(body)
    message.is_read = True
    message.is_sent = False
    message.is_reply = is_reply
    message.inbox_uid = uid
    message.public_id = uid

    # Set the snippet
    message.calculate_html_snippet(body)

    # Associate attachments to the draft message
    for block in blocks:
        # Create a new Part object to associate to the message object.
        # (You can't just set block.message, because if block is an attachment
        # on an existing message, that would dissociate it from the existing
        # message.)
        part = Part()
        part.namespace_id = account.namespace.id
        part.content_disposition = 'attachment'
        part.content_type = block.content_type
        part.is_inboxapp_attachment = True
        part.data = block.data
        part.filename = block.filename
        message.parts.append(part)
        db_session.add(part)

    # TODO(emfree) Update contact data here.

    if is_reply:
        message.is_reply = True
        # If we're updating a draft, copy the in-reply-to and references
        # headers from the parent. Otherwise, construct them from the last
        # message currently in the thread.
        if parent_draft is not None:
            message.in_reply_to = parent_draft.in_reply_to
            message.references = parent_draft.references
        else:
            _set_reply_headers(message, thread)
    if thread is None:
        # Create a new thread object for the draft.
        thread = Thread(subject=message.subject,
                        recentdate=message.received_date,
                        namespace=account.namespace,
                        subjectdate=message.received_date)
        db_session.add(thread)

    message.thread = thread
    # This triggers an autoflush, so we need to execute it after setting
    # message.thread
    thread.apply_tag(account.namespace.tags['drafts'])

    if new_tags:
        tags_to_keep = {tag for tag in thread.tags if not tag.user_created}
        thread.tags = new_tags | tags_to_keep

    if syncback:
        schedule_action('save_draft', message, message.namespace.id,
                        db_session)

    db_session.add(message)
    db_session.commit()
    return message
示例#14
0
def threads(namespace_id,
            subject,
            from_addr,
            to_addr,
            cc_addr,
            bcc_addr,
            any_email,
            thread_public_id,
            started_before,
            started_after,
            last_message_before,
            last_message_after,
            filename,
            in_,
            unread,
            starred,
            limit,
            offset,
            view,
            db_session,
            sort_field='recentdate',
            sort_order='desc'):

    if view == 'count':
        query = db_session.query(func.count(Thread.id))
    elif view == 'ids':
        query = db_session.query(Thread.public_id)
    else:
        query = db_session.query(Thread)

    filters = [Thread.namespace_id == namespace_id, Thread.deleted_at == None]
    if thread_public_id is not None:
        filters.append(Thread.public_id == thread_public_id)

    if started_before is not None:
        filters.append(Thread.subjectdate < started_before)

    if started_after is not None:
        filters.append(Thread.subjectdate > started_after)

    if last_message_before is not None:
        filters.append(Thread.recentdate < last_message_before)

    if last_message_after is not None:
        filters.append(Thread.recentdate > last_message_after)

    if subject is not None:
        filters.append(Thread.subject == subject)

    query = query.filter(*filters)

    if from_addr is not None:
        from_query = contact_subquery(db_session, namespace_id, from_addr,
                                      'from_addr')
        query = query.filter(Thread.id.in_(from_query))

    if to_addr is not None:
        to_query = contact_subquery(db_session, namespace_id, to_addr,
                                    'to_addr')
        query = query.filter(Thread.id.in_(to_query))

    if cc_addr is not None:
        cc_query = contact_subquery(db_session, namespace_id, cc_addr,
                                    'cc_addr')
        query = query.filter(Thread.id.in_(cc_query))

    if bcc_addr is not None:
        bcc_query = contact_subquery(db_session, namespace_id, bcc_addr,
                                     'bcc_addr')
        query = query.filter(Thread.id.in_(bcc_query))

    if any_email is not None:
        any_contact_query = db_session.query(Message.thread_id) \
            .join(MessageContactAssociation) \
            .join(Contact, MessageContactAssociation.contact_id == Contact.id)\
            .filter(Contact.email_address.in_(any_email),
                    Contact.namespace_id == namespace_id)\
            .subquery()
        query = query.filter(Thread.id.in_(any_contact_query))

    if filename is not None:
        files_query = db_session.query(Message.thread_id). \
            join(Part).join(Block). \
            filter(Block.filename == filename,
                   Block.namespace_id == namespace_id). \
            subquery()
        query = query.filter(Thread.id.in_(files_query))

    if in_ is not None:
        category_filters = [Category.name == in_, Category.display_name == in_]
        try:
            valid_public_id(in_)
            category_filters.append(Category.public_id == in_)
        except InputError:
            pass
        category_query = db_session.query(Message.thread_id). \
            prefix_with('STRAIGHT_JOIN'). \
            join(Message.messagecategories).join(MessageCategory.category). \
            filter(Category.namespace_id == namespace_id,
                   or_(*category_filters)).subquery()
        query = query.filter(Thread.id.in_(category_query))

    if unread is not None:
        read = not unread
        unread_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_read == read).subquery()
        query = query.filter(Thread.id.in_(unread_query))

    if starred is not None:
        starred_query = db_session.query(Message.thread_id).filter(
            Message.namespace_id == namespace_id,
            Message.is_starred == starred).subquery()
        query = query.filter(Thread.id.in_(starred_query))

    if view == 'count':
        return {"count": query.one()[0]}

    # Eager-load some objects in order to make constructing API
    # representations faster.
    if view != 'ids':
        expand = (view == 'expanded')
        query = query.options(*Thread.api_loading_options(expand))

    #Sort by field and direction
    sort_field = sort_field.lower() if sort_field is not None else None
    sort_order = sort_order.lower() if sort_order is not None else None
    db_sort_field = Thread.recentdate
    db_sort_order = desc

    sort_field_mapping = {
        'subject': Thread.subject,
        'date': Thread.recentdate,
        'start_date': Thread.subjectdate
    }
    if sort_field in sort_field_mapping:
        db_sort_field = sort_field_mapping[sort_field]

    if sort_order == 'asc':
        db_sort_order = asc

    query = query.order_by(db_sort_order(db_sort_field)).limit(limit)

    if offset:
        query = query.offset(offset)

    if view == 'ids':
        return [x[0] for x in query.all()]

    return query.all()