def test_out_of_scope_passed_error(logfile):
    """
    If an error is thrown out of band ensure there's no error data

    Unless we pass a value to the `error` field, in which case stuff it
    in `error_message`
    """
    configure_logging()
    log = get_logger()

    log.error("0 test", exc_info=sys.exc_info())
    log.error("1 test", exc_info=True)
    log.error("2 test", error="OVERRIDDEN MESSAGE")
    log.error("3 test", error=100.0)
    log.error("4 test", error=True)
    log.warn("5 test", include_exception=True)
    log.info("6 test", include_exception=True)
    log.exception("7 test")

    lines = logfile.readlines()
    for i, line in enumerate(lines):
        out = json.loads(line)
        assert out['event'] == "{} test".format(i)
        assert 'error_name' not in out
        assert 'error_traceback' not in out
        if i == 2:
            assert out['error_message'] == "OVERRIDDEN MESSAGE"
        elif i == 3:
            assert out['error_message'] == 100.0
        elif i == 4:
            assert out['error_message'] is True
        else:
            assert 'error_message' not in out
def test_adjacent_error(logfile):
    """
    NOTE: we can only easily detect the exc_info traceback at the
    function level. This means while we are protected from the test case
    above, we will still include exception traces from adjacent functions
    in the same function call.

    Since fixing this would likely require parsing an AST or compromising
    on our default exceptuion-including behavior, and this fairly rare
    case would at worst lead people to the wrong part of the same
    function, we leave this as a known limitation
    """
    configure_logging()
    log = get_logger()

    try:
        raise ValueError("Test message")
    except ValueError:
        pass

    log.exception("Oh no")
    out = json.loads(logfile.readlines()[0])
    assert out['event'] == "Oh no"
    assert out['level'] == "error"
    assert out['error_name'] == "ValueError"
    assert out['error_message'] == "Test message"
    assert 'error_traceback' in out
def test_out_of_scope_exception(logfile):
    configure_logging()
    log = get_logger()

    log.exception("Oh no")
    out = json.loads(logfile.readlines()[0])
    assert out['event'] == "Oh no"
    assert out['level'] == "error"
    assert 'error_name' not in out
    assert 'error_message' not in out
    assert 'error_traceback' not in out
def test_standard_error(logfile):
    configure_logging()
    log = get_logger()

    log.error("Oh no")
    out = json.loads(logfile.readlines()[0])
    assert out['event'] == "Oh no"
    assert out['level'] == "error"
    assert 'error_name' not in out
    assert 'error_message' not in out
    assert 'error_traceback' not in out
    assert 'error' not in out
    assert 'exc_info' not in out
    assert 'exception' not in out
def test_basic_log(logfile):
    configure_logging()
    log = get_logger()

    log.info("Hi")
    out = json.loads(logfile.readlines()[0])
    assert out['event'] == "Hi"
    assert out['level'] == "info"
    assert 'error_name' not in out
    assert 'error_message' not in out
    assert 'error_traceback' not in out
    assert 'error' not in out
    assert 'exc_info' not in out
    assert 'exception' not in out
def test_lingering_error(logfile):
    configure_logging()
    log = get_logger()

    def some_random_fn():
        try:
            raise ValueError("Test message")
        except ValueError:
            pass

    log.error("Oh no")
    out = json.loads(logfile.readlines()[0])
    assert out['event'] == "Oh no"
    assert out['level'] == "error"
    assert 'error_name' not in out
    assert 'error_message' not in out
    assert 'error_traceback' not in out
def test_exclude_exception(logfile):
    configure_logging()
    log = get_logger()

    try:
        raise ValueError("Test message")
    except ValueError:
        log.error("Oh no", include_exception=False)
        out = json.loads(logfile.readlines()[0])
        assert out['event'] == "Oh no"
        assert out['level'] == "error"
        assert 'error_name' not in out
        assert 'error_message' not in out
        assert 'error_traceback' not in out
        assert 'error' not in out
        assert 'exc_info' not in out
        assert 'exception' not in out
def test_configure_logging():
    root_logger = logging.getLogger()
    configure_logging(log_level="error")
    assert root_logger.getEffectiveLevel() == logging.ERROR
    configure_logging(log_level="critical")
    assert root_logger.getEffectiveLevel() == logging.CRITICAL
    configure_logging(log_level=10)
    assert root_logger.getEffectiveLevel() == logging.DEBUG
    configure_logging()
    assert root_logger.getEffectiveLevel() == logging.INFO
Ejemplo n.º 9
0
def test_configure_logging():
    root_logger = logging.getLogger()
    configure_logging(log_level="error")
    assert root_logger.getEffectiveLevel() == logging.ERROR
    configure_logging(log_level="critical")
    assert root_logger.getEffectiveLevel() == logging.CRITICAL
    configure_logging(log_level=10)
    assert root_logger.getEffectiveLevel() == logging.DEBUG
    configure_logging()
    assert root_logger.getEffectiveLevel() == logging.INFO
def test_code(logfile):
    configure_logging()
    log = get_logger()

    class CodeError(Exception):
        def __init__(self, msg, code):
            self.message = msg
            self.code = code

    try:
        raise CodeError("Custom message", 404)
    except CodeError:
        log.error("Oh no")
        out = json.loads(logfile.readlines()[0])
        assert out['event'] == "Oh no"
        assert out['level'] == "error"
        assert out['error_name'] == "CodeError"
        assert out['error_message'] == "Custom message"
        assert 'error_traceback' in out
        assert out['error_code'] == 404
        assert 'error' not in out
        assert 'exc_info' not in out
        assert 'exception' not in out
def test_alternative_arg_errors(logfile):
    """
    Allow a user to pass an object to `error=err_obj`
    Allow a user to pass a string to `error='foo'`
    Also allow `exc_info=True`
    """
    configure_logging()
    log = get_logger()

    try:
        raise ValueError("Test message")
    except ValueError as err_obj:
        log.exception("0 test")
        log.error("1 test")
        log.warn("2 test", include_exception=True)
        log.info("3 test", include_exception=True)
        log.error("4 test", error=err_obj)
        log.error("5 test", exc_info=sys.exc_info())
        log.error("6 test", exc_info=True)
        log.error("7 test", error="OVERRIDDEN MESSAGE")
        log.error("8 test", error=100.0)
        log.error("9 test", error=True)

    lines = logfile.readlines()
    for i, line in enumerate(lines):
        out = json.loads(line)
        assert out['event'] == "{} test".format(i)
        assert out['error_name'] == "ValueError"
        if i == 7:
            assert out['error_message'] == "OVERRIDDEN MESSAGE"
        elif i == 8:
            assert out['error_message'] == 100.0
        elif i == 9:
            assert out['error_message'] is True
        else:
            assert out['error_message'] == "Test message"
        assert 'error_traceback' in out
Ejemplo n.º 12
0
 def __init__(self, cfg):
     gunicorn.glogging.Logger.__init__(self, cfg)
     configure_logging(log_level=LOGLEVEL)
     self.error_log = log
#!/usr/bin/env python
# We previously didn't store IMAP path separators for generic imap accounts.
# This script backfixes the accounts.
import click

from inbox.crispin import connection_pool

from nylas.logging import get_logger, configure_logging
from inbox.models.backends.generic import GenericAccount
from inbox.models.session import (session_scope, global_session_scope,
                                  session_scope_by_shard_id)

configure_logging()
log = get_logger(purpose='separator-backfix')


@click.command()
@click.option('--min-id', type=int, default=None)
@click.option('--max-id', type=int, default=None)
@click.option('--shard-id', type=int, default=None)
def main(min_id, max_id, shard_id):
    generic_accounts = []
    failed = []

    if min_id is not None or max_id is not None:
        # Get the list of running Gmail accounts.
        with global_session_scope() as db_session:
            generic_accounts = db_session.query(GenericAccount).filter(
                GenericAccount.sync_state == 'running')

            if min_id is not None:
Ejemplo n.º 14
0
import pytest
import json
import time
from datetime import datetime, timedelta

from inbox.heartbeat.store import (HeartbeatStore, HeartbeatStatusProxy,
                                   HeartbeatStatusKey)
from inbox.heartbeat.status import (clear_heartbeat_status, get_ping_status)
import inbox.heartbeat.config as heartbeat_config
from inbox.heartbeat.config import ALIVE_EXPIRY
from inbox.config import config

from nylas.logging import configure_logging
configure_logging(config.get('LOGLEVEL'))

from mockredis import MockRedis
# Note that all Redis commands are mocked via mockredis in conftest.py.


def proxy_for(account_id,
              folder_id,
              email='*****@*****.**',
              provider='gmail',
              device_id=0):
    return HeartbeatStatusProxy(account_id=account_id,
                                folder_id=folder_id,
                                folder_name="Inbox",
                                email_address=email,
                                provider_name=provider,
                                device_id=device_id)
Revises: 23e204cd1d91
Create Date: 2015-07-09 00:23:04.918833

"""

# revision identifiers, used by Alembic.
revision = "334b33f18b4f"
down_revision = "23e204cd1d91"

from nylas.logging import configure_logging, get_logger
from sqlalchemy import asc
from sqlalchemy.orm import joinedload, load_only, subqueryload

from inbox.config import config

configure_logging(config.get("LOGLEVEL"))
log = get_logger()


def populate_labels(uid, account, db_session):
    from inbox.models import Label

    existing_labels = {(l.name, l.canonical_name): l for l in account.labels}
    uid.is_draft = "\\Draft" in uid.g_labels
    uid.is_starred = "\\Starred" in uid.g_labels

    category_map = {
        "\\Inbox": "inbox",
        "\\Important": "important",
        "\\Sent": "sent"
    }
Ejemplo n.º 16
0
 def __init__(self, cfg):
     gunicorn.glogging.Logger.__init__(self, cfg)
     configure_logging(log_level=LOGLEVEL)
     self.error_log = log
Ejemplo n.º 17
0
from inbox.heartbeat.status import (
    clear_heartbeat_status,
    list_all_accounts,
    list_alive_accounts,
    list_dead_accounts,
    heartbeat_summary,
    get_account_metadata,
    get_ping_status,
    AccountHeartbeatStatus,
)
from inbox.heartbeat.config import ALIVE_EXPIRY
from inbox.config import config

from nylas.logging import configure_logging

configure_logging(config.get("LOGLEVEL"))

from mockredis import MockRedis

# Note that all Redis commands are mocked via mockredis in conftest.py.


def proxy_for(account_id, folder_id, email="*****@*****.**", provider="gmail", device_id=0):
    return HeartbeatStatusProxy(
        account_id=account_id,
        folder_id=folder_id,
        folder_name="Inbox",
        email_address=email,
        provider_name=provider,
        device_id=device_id,
    )
Ejemplo n.º 18
0
import pytest
import json
import time
from datetime import datetime, timedelta

from inbox.heartbeat.store import (HeartbeatStore, HeartbeatStatusProxy,
                                   HeartbeatStatusKey)
from inbox.heartbeat.status import (clear_heartbeat_status,
                                    get_ping_status)
from inbox.heartbeat.config import ALIVE_EXPIRY
from inbox.config import config

from nylas.logging import configure_logging
configure_logging(config.get('LOGLEVEL'))

from mockredis import MockRedis
# Note that all Redis commands are mocked via mockredis in conftest.py.


def proxy_for(account_id, folder_id, email='*****@*****.**', provider='gmail',
              device_id=0):
    return HeartbeatStatusProxy(account_id=account_id, folder_id=folder_id,
                                folder_name="Inbox",
                                email_address=email,
                                provider_name=provider,
                                device_id=device_id)


def fuzzy_equals(a, b):
    if isinstance(a, datetime) or isinstance(b, datetime):
        if not isinstance(a, datetime):