def test_logger_when_config_file_exist(self):
     '''
     when config file is there, then we expect a logger based
     on specified configuration
     '''
     prod_logger = get_logger(logger_type='prod')
     datadog_logger = get_logger(logger_type='datadog')
     self.assertEqual(prod_logger.logger.name, 'prod')
     self.assertEqual(prod_logger.logger.level, 20)
     self.assertEqual(datadog_logger.logger.level, 20)
     self.assertEqual(datadog_logger.logger.name, 'datadog')
Esempio n. 2
0
def dynamo_to_es(event, context):
    """
    :param event: dict
    :param context: dict
    :return: None
    """
    try:
        return DynamoToEsService(event=event,
                                 es_endpoint=os.environ["ES_ENDPOINT"],
                                 region=os.environ["REGION"]).execute()
    except Exception as ex:
        get_logger().error(ex)
    def _wrapper(event, context):
        if event and 'requestContext' in event:
            request_context = event.get('requestContext')
            if 'requestId' in request_context:
                src.common.context.REQUEST_ID = request_context.get('requestId')
            if os.environ.get('STAGE') is not 'prod' and 'authorizer' in request_context:
                get_logger().debug("Lambda event: %s", request_context.get('authorizer'))

        try:
            return lambda_func(event, context)
        except Exception:
            get_logger().error("Error within lambda function.", exc_info=1)
            raise
    def test_get_logger(self, mock_logging):
        # given
        src.common.context.REQUEST_ID = "12345"

        mock_logger = MagicMock()
        mock_logging.getLogger = MagicMock(return_value=mock_logger)

        # when
        logger = get_logger('my stage')

        # then
        self.assertEqual(mock_logging.getLogger.mock_calls, [
            call('my stage'),
        ])
        self.assertEqual(mock_logging.LoggerAdapter.mock_calls,
                         [call(mock_logger, {'request_id': '12345'})])
        self.assertTrue(logger is not None)
Esempio n. 5
0
    def post_to_es(self, payload):
        """
        High-level POST data to Amazon Elasticsearch Service with exponential backoff
        according to suggested algorithm:
        http://docs.aws.amazon.com/general/latest/gr/api-retries.html
        :param payload:
        :return:
        """

        # Get aws_region and credentials to post signed URL to ES
        es_region = self.es_region
        session = Session({'region': es_region})
        creds = get_credentials(session)
        es_url = urlparse.urlparse(self.es_endpoint)
        es_endpoint = es_url.netloc or es_url.path  # Extract the domain name in ES_ENDPOINT

        # Post data with exponential backoff
        retries = 0
        while retries < self.es_max_retries:
            if retries > 0:
                seconds = (2**retries) * .1
                time.sleep(seconds)

            try:
                es_ret_str = self.post_data_to_es(payload, es_region, creds,
                                                  es_endpoint, '/_bulk')
                es_ret = json.loads(es_ret_str)

                if es_ret['errors']:
                    get_logger().error(
                        'ES post unsuccessful, errors present, took=%sms',
                        es_ret['took'])
                    # Filter errors
                    es_errors = [
                        item for item in es_ret['items']
                        if item.get('index').get('error')
                    ]
                    get_logger().error('List of items with errors: %s',
                                       json.dumps(es_errors))
                else:
                    get_logger().info('ES post successful, took=%sms',
                                      es_ret['took'])
                break  # Sending to ES was ok, break retry loop
            except ESException as e:
                if (e.status_code >= 500) and (e.status_code <= 599):
                    retries += 1  # Candidate for retry
                else:
                    raise  # Stop retrying, re-raise exception
    def test_logger_format_for_datadog(self):
        '''
        test that datalog logger is going to write messages in the right format:
        '''
        datadog_logger = get_logger(logger_type='datadog')
        with LogCapture(('datadog')) as log_capture:
            datadog_logger.extra = {
                **datadog_logger.extra,
                **{
                    'metric_value': '1',
                    'metric_type': 'count',
                    'metric_name': 'page_view',
                    "tag_value": "#function:get_id"
                }
            }
            datadog_logger.info("")

            self.assertEqual(len(log_capture.records), 1)
            record = log_capture.records[-1]
            self.assertEqual(record.getMessage(), '')
            self.assertEqual(record.metric_value, '1')
            self.assertEqual(record.metric_type, 'count')
            self.assertEqual(record.metric_name, 'page_view')
            self.assertEqual(record.tag_value, '#function:get_id')
Esempio n. 7
0
#!/usr/bin/env python
# coding:utf-8

from flask import Flask
from flask import g
from flask import request as r
from werkzeug.utils import find_modules, import_string
from flask_cors import CORS
import sqlite3
from src.common import log
import traceback
from flask import jsonify
from src.common.errors import APIBaseException

logger = log.get_logger("app")


def create_app():
    app = Flask(__name__)
    # 处理跨域访问
    CORS(app)
    # 注册URL Handler
    register_blueprints(app)

    reg_before_request(app)
    reg_after_request(app)
    reg_exception_handler(app)
    reg_teardowns(app)
    init_sqlite(app)

    return app
Esempio n. 8
0
 def handel_exception(cls, exception):
     get_logger().warning(exception, exc_info=True)
Esempio n. 9
0
# -*- coding: utf8 -*-
import argparse
import os
import random

import py_eureka_client.eureka_client as eureka_client

import src.common.log as log_util
from src import __version__

abspath = os.path.dirname(__file__)

log = log_util.get_logger()


def get_config(section='eureka', option='urls'):
    return log_util.get_config(section, option)


def get_server_url(eureka_server="http://172.16.50.2:12386/eureka/",
                   app_name="ALGO-SERVER-MASTER"):
    application = {}
    untry_servers = eureka_server.split(",")
    tried_servers = []
    ok = False
    while len(untry_servers) > 0:
        url = untry_servers[0].strip()
        try:
            application = eureka_client.get_application(url, app_name=app_name)
        except Exception as e:
            log.info("Eureka server [%s] is down, use next url to try." % url)
Esempio n. 10
0
    def execute(self):
        records = self.event['Records']
        now = datetime.datetime.utcnow()

        ddb_deserializer = StreamTypeDeserializer()
        es_actions = [
        ]  # Items to be added/updated/removed from ES - for bulk API
        cnt_insert = cnt_modify = cnt_remove = 0
        for record in records:
            # Handle both native DynamoDB Streams or Streams data from Kinesis (for manual replay)
            if record.get('eventSource') == 'aws:dynamodb':
                ddb = record['dynamodb']
                ddb_table_name = self.get_table_name_from_arn(
                    record['eventSourceARN'])
                doc_seq = ddb['SequenceNumber']
            elif record.get('eventSource') == 'aws:kinesis':
                ddb = json.loads(base64.b64decode(record['kinesis']['data']))
                ddb_table_name = ddb['SourceTable']
                doc_seq = record['kinesis']['sequenceNumber']
            else:
                get_logger().error('Ignoring non-DynamoDB event sources: %s',
                                   record.get('eventSource'))
                continue

            # Compute DynamoDB table, type and index for item
            doc_table = self.doc_type_format.format(
                ddb_table_name.lower())  # Use formatter
            doc_type = self.doc_type_format.format(
                ddb_table_name.lower())  # Use formatter
            doc_index = self.compute_doc_index(ddb['Keys'], ddb_deserializer)

            # Dispatch according to event TYPE
            event_name = record['eventName'].upper()  # INSERT, MODIFY, REMOVE

            # Treat events from a Kinesis stream as INSERTs
            if event_name == 'AWS:KINESIS:RECORD':
                event_name = 'INSERT'

            # Update counters
            if event_name == 'INSERT':
                cnt_insert += 1
            elif event_name == 'MODIFY':
                cnt_modify += 1
            elif event_name == 'REMOVE':
                cnt_remove += 1
            else:
                get_logger().warning('Unsupported event_name: %s', event_name)

            # If DynamoDB INSERT or MODIFY, send 'index' to ES
            if (event_name == 'INSERT') or (event_name == 'MODIFY'):
                if 'NewImage' not in ddb:
                    get_logger().warning(
                        'Cannot process stream if it does not contain NewImage'
                    )
                    continue
                # Deserialize DynamoDB type to Python types
                doc_fields = ddb_deserializer.deserialize(
                    {'M': ddb['NewImage']})
                # Add metadata
                doc_fields['@timestamp'] = now.isoformat()
                doc_fields['@SequenceNumber'] = doc_seq

                # Generate JSON payload
                doc_json = json.dumps(doc_fields)

                # Generate ES payload for item
                action = {
                    'index': {
                        '_index': doc_table,
                        '_type': doc_type,
                        '_id': doc_index
                    }
                }
                es_actions.append(
                    json.dumps(action))  # Action line with 'index' directive
                es_actions.append(doc_json)  # Payload line

            # If DynamoDB REMOVE, send 'delete' to ES
            elif event_name == 'REMOVE':
                action = {
                    'delete': {
                        '_index': doc_table,
                        '_type': doc_type,
                        '_id': doc_index
                    }
                }
                es_actions.append(json.dumps(action))

        # Prepare bulk payload
        es_actions.append('')  # Add one empty line to force final \n
        es_payload = '\n'.join(es_actions)

        self.post_to_es(es_payload)  # Post to ES with exponential backoff