コード例 #1
0
def initialize_logging():
    kafkalogger = logging.getLogger("kafka")
    kafkalogger.addHandler(logging.StreamHandler(sys.stdout))
    kafkalogger.setLevel(os.getenv("LOG_LEVEL", "INFO"))

    if any("KUBERNETES" in k for k in os.environ):
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(LogstashFormatterV1())
        logging.root.setLevel(os.getenv("LOG_LEVEL", "INFO"))
        logging.root.addHandler(handler)
    else:
        logging.basicConfig(
            level="INFO", format="%(threadName)s %(levelname)s %(name)s - %(message)s"
        )

    logger = logging.getLogger(app_config.get_app_name())

    if config.aws_access_key_id and config.aws_secret_access_key:
        logger.warn("configuring cloudwatch logging")
        config_cloudwatch(logger)
        logger.warn("cloudwatch logging ENABLED")
    else:
        logger.warn("cloudwatch logging DISABLED")

    return logger
コード例 #2
0
def detectQR(image_name):
    host = '127.0.0.1'
    test_logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = LogstashFormatterV1()
    handler.setFormatter(formatter)

    test_logger.setLevel(logging.INFO)
    test_logger.addHandler(logstash.LogstashHandler(host, 5000, version=1))

    script_path = os.getcwd()
    detect_result = os.popen("java -jar QRDetect.jar %s" % image_name)
    csv = detect_result.read().split(',')
    timestamp = image_name.split('_')[1].split('.')[0]

    if (len(csv) == 2):
        print("NO QR DETECETED")

    else:
        for i in range(0, int(len(csv) / 3)):

            data = {
                'cartID': int(csv[i + 1]),
                'camID': int(csv[0]),
                'x': int(csv[i + 2]),
                'y': int(csv[i + 3]),
                'time': int(timestamp),
            }
            #data_json = json.dumps(data, indent = 2)
    test_logger.info('python-logstash: test extra fields', extra=data)
コード例 #3
0
def init_logger():
    logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = LogstashFormatterV1()
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger
コード例 #4
0
def configure_logging(service_name):
    """
    Configure logging based on the settings in the settings file.
    This sets up a handler for each logging mode that is enabled.
    See `microservice.core.settings.LoggingMode` for the supported logging types.

    :param str service_name: Name of the service being served by this instance.
    """
    logger = logging.getLogger()
    logger.setLevel(settings.logging_level)

    formatter_kwargs = {
        'fmt': json.dumps({'extra': {
            'local_service': service_name,
            # Basic way to distinguish logs between instances of the same microservice.
            'instance_id': random.randint(100000, 999999)
        }})
    }

    formatter = LogstashFormatterV1(**formatter_kwargs)

    if settings.LoggingMode.FILE in settings.logging_modes:
        file_handler = logging.FileHandler('{}.log'.format(service_name))
        file_handler.setFormatter(formatter)
        file_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(file_handler)

    if settings.LoggingMode.HUMAN in settings.logging_modes:
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(HumanReadableLogstashFormatter(**formatter_kwargs))
        stdout_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(stdout_handler)

    if settings.LoggingMode.STDOUT in settings.logging_modes:
        stdout_handler = logging.StreamHandler(sys.stdout)
        stdout_handler.setFormatter(formatter)
        stdout_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(stdout_handler)

    if settings.LoggingMode.LOGSTASH in settings.logging_modes:
        # TODO: test this
        raise Exception("Warning: untested")
        logstash_handler = AsynchronousLogstashHandler(
            **settings.logstash_settings)
        logstash_handler.setFormatter(formatter)
        logstash_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(logstash_handler)

    if settings.LoggingMode.FLUENTD in settings.logging_modes:
        # TODO: test this
        raise Exception("Warning: untested")
        fluentd_handler = handler.FluentHandler(
            'pycroservices.follow',
            **settings.fluentd_settings,
            buffer_overflow_handler=overflow_handler)
        fluentd_handler.setFormatter(formatter)
        fluentd_handler.addFilter(RequestIDLogFilter())
        logger.addHandler(fluentd_handler)
コード例 #5
0
def config_cloudwatch(logger):
    CW_SESSION = Session(aws_access_key_id=config.AWS_ACCESS_KEY_ID,
                         aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY,
                         region_name=config.AWS_REGION_NAME)
    cw_handler = watchtower.CloudWatchLogHandler(boto3_session=CW_SESSION,
                                                 log_group=config.LOG_GROUP,
                                                 stream_name=config.HOSTNAME)
    cw_handler.setFormatter(LogstashFormatterV1())
    logger.addHandler(cw_handler)
コード例 #6
0
def initialize_logging():
    kafkalogger = logging.getLogger("kafka")
    kafkalogger.setLevel("ERROR")
    if any("OPENSHIFT" in k for k in os.environ):
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(LogstashFormatterV1())
        logging.root.setLevel(os.getenv("LOG_LEVEL", "INFO"))
        logging.root.addHandler(handler)
    else:
        logging.basicConfig(
            level=config.LOG_LEVEL,
            format="%(threadName)s %(levelname)s %(name)s - %(message)s",
        )

    if os.environ.get("ACG_CONFIG"):
        f = clowder_config
    else:
        f = non_clowder_config

    aws_access_key_id, aws_secret_access_key, aws_region_name, aws_log_group, create_log_group = f(
    )

    if all((aws_access_key_id, aws_secret_access_key, aws_region_name,
            aws_log_group)):
        from boto3.session import Session
        import watchtower

        boto3_session = Session(aws_access_key_id=aws_access_key_id,
                                aws_secret_access_key=aws_secret_access_key,
                                region_name=aws_region_name)

        cw_handler = watchtower.CloudWatchLogHandler(
            boto3_session=boto3_session,
            log_group=aws_log_group,
            stream_name=socket.gethostname(),
            create_log_group=create_log_group)

        cw_handler.setFormatter(LogstashFormatterV1())
        logging.root.addHandler(cw_handler)

    logger = logging.getLogger(config.APP_NAME)

    return logger
コード例 #7
0
ファイル: views_sub.py プロジェクト: kangbada0728/O4O_Cart
def calculate(things_to_buy_count, request_data, customer_id, coupons_list,
              final_payment_amount, nocoupon_payment_amount):
    host = '127.0.0.1'
    test_logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = LogstashFormatterV1()
    handler.setFormatter(formatter)

    test_logger.setLevel(logging.INFO)
    test_logger.addHandler(logstash.LogstashHandler(host, 5000, version=1))
    i = 0
    while i < things_to_buy_count:
        item_serial = str(request_data['serial' + str(i + 1)])
        try:
            print(item_serial)
            item_ob_info = Item_Info.objects.get(serial_num=item_serial)
            item_ob = Item_Info.objects.get(serial_num=item_serial).item
        except Item_Info.DoesNotExist:
            print('Invalid Item Serial\n')
            return HttpResponse('Invalid Item Serial\n')

        print(customer_id)

        logdata = {
            'serial': item_serial,
        }

        data = Pur_History(customer=customer_id, item=item_ob_info)
        print(data)
        data.save()
        test_logger.info('python-logstash: test extra fields', extra=logdata)
        coupon_use_checker = False
        for coupon in coupons_list:
            if coupon.coupon_item.item == item_ob and coupon.coupon_use == None:
                Coupon_Item_Info.objects.filter(
                    serial_num=coupon.serial_num).update(coupon_use=True)
                final_payment_amount = final_payment_amount + int(
                    (float(item_ob.price) *
                     (float(1) -
                      float(coupon.coupon_item.discount_rate) / float(100))))
                print(
                    int((float(item_ob.price) *
                         (float(1) -
                          float(coupon.coupon_item.discount_rate) / 100))))
                print('coupon adjust')
                coupon_use_checker = True
                break
        if coupon_use_checker == False:
            print(item_ob.price)
            print('coupon not adjust')
            final_payment_amount = final_payment_amount + item_ob.price
        nocoupon_payment_amount = nocoupon_payment_amount + item_ob.price
        i = i + 1

    return final_payment_amount
コード例 #8
0
def detectQR(image_name):
    host = '127.0.0.1'
    test_logger = logging.getLogger()
    handler = logging.StreamHandler()
    formatter = LogstashFormatterV1()
    handler.setFormatter(formatter)

    test_logger.setLevel(logging.INFO)
    test_logger.addHandler(logstash.LogstashHandler(host, 5000, version=1))

    #image_name = '1527589403.6_1.jpg';
    script_path = os.getcwd()
    detect_result = os.popen("java -jar QRDetect.jar %s" % image_name)
    csv = detect_result.read().split(',')
    #timestamp = image_name.split('_')[1].split('.')[0]
    timestamp = image_name.split('_')[0]

    if (len(csv) == 2):
        print("NO QR DETECETED")
        print(csv)
    else:
        print("QR DETECTED############################")
        print(csv)
        for i in range(0, int(len(csv) / 3)):
            logdata = {
                'cartID': int(csv[i + 1]),
                'camID': int(csv[0]),
                'x': int(csv[i + 2]),
                'y': int(csv[i + 3]),
                'time': int(timestamp),
            }
            #data_json = json.dumps(data, indent = 2)

            time_num = int(timestamp)
            #serial = 'cart128644'
            serial = str(int(csv[i + 1]))
            camera_num = int(csv[0])
            coor_x = int(csv[i + 2])
            coor_y = int(csv[i + 3])

            cart_customer = Cart_Info.objects.get(serial_num=serial).owner
            camera = Camera_Info.objects.get(num=camera_num)
            data = Mv_History(time=time_num,
                              customer=cart_customer,
                              camera_num=camera,
                              x=coor_x,
                              y=coor_y)
            data.save()
            #receive_cartqrcode(serial,camera_num,coor_x,coor_y)

            print("DB SAVED")
            test_logger.info('python-logstash: test extra fields',
                             extra=logdata)
            print("LOG SAVED")
コード例 #9
0
def config_cloudwatch(logger):
    CW_SESSION = Session(
        aws_access_key_id=config.aws_access_key_id,
        aws_secret_access_key=config.aws_secret_access_key,
        region_name=config.aws_region_name,
    )
    cw_handler = watchtower.CloudWatchLogHandler(
        boto3_session=CW_SESSION,
        log_group=config.log_group,
        stream_name=config.namespace,
    )
    cw_handler.setFormatter(LogstashFormatterV1())
    logger.addHandler(cw_handler)
コード例 #10
0
def create_logger() -> logging.Logger:
    """Set up logstash formatted logger.

    Returns:
        logging.Logger: Python logger for __name__

    """
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler(stream=sys.stdout)
    handler.setFormatter(LogstashFormatterV1())
    logger.setLevel(logging.INFO)
    logger.handlers = [handler]

    return logger
コード例 #11
0
def initialize_logging():
    kafkalogger = logging.getLogger("kafka")
    kafkalogger.setLevel("ERROR")
    if any("KUBERNETES" in k for k in os.environ):
        handler = logging.StreamHandler(sys.stdout)
        handler.setFormatter(LogstashFormatterV1())
        logging.root.setLevel(os.getenv("LOG_LEVEL", "INFO"))
        logging.root.addHandler(handler)
    else:
        logging.basicConfig(
            level=config.LOG_LEVEL,
            format="%(threadName)s %(levelname)s %(name)s - %(message)s")

    logger = logging.getLogger(config.APP_NAME)

    if (config.AWS_ACCESS_KEY_ID and config.AWS_SECRET_ACCESS_KEY):
        config_cloudwatch(logger)

    return logger
コード例 #12
0
def init(logstash_type,
         level=logging.DEBUG,
         logpath=None,
         logger_name=None,
         extra_fields=None):
    """
    Initializes and returns configured LogstashFormatter logger

    :param logstash_type: required extra field used for logstash configuration to Kafka topic output
    :type logstash_type: str
    :param level: defaulted to DEBUG
    :type level: int
    :param logpath: optional, filepath that the log will be written to, prints to stdout if logpath=None
    :type logpath: str
    :param logger_name: name of logger to be configured
    :type logger_name: str
    :param extra_fields: provide extra fields to be always present in logs
    :type extra_fields: dict
    """
    if logpath:
        handler = logging.FileHandler(logpath)
    else:
        handler = logging.StreamHandler(sys.stdout)

    # since we require that type is declared, it is always defined as an extra field
    fmt = {'extra': {'type': logstash_type}}

    # if additional extra fields are defined they are appended
    if extra_fields:
        if not isinstance(extra_fields, dict):
            raise Exception('extra_fields must be of type dict')
        fmt['extra'].update(extra_fields)

    handler.setFormatter(LogstashFormatterV1(fmt=json.dumps(fmt)))
    handler.setLevel(level)

    logger = logging.getLogger(logger_name)
    logger.addHandler(handler)
    logger.setLevel(level)

    return logger
コード例 #13
0
ファイル: service.py プロジェクト: Wenbo16/Tap-News
import sys
import json
import logging
from logstash_formatter import LogstashFormatterV1

with open('/home/wenbo/Desktop/Tap-News/config/config.json') as config_file:
    config = json.load(config_file)

SERVER_HOST = config['backend_server']['service']['SERVER_HOST']
SERVER_PORT = config['backend_server']['service']['SERVER_PORT']

# Initialize logger
logger = logging.getLogger('backend-server')
logger.setLevel(logging.INFO)
logFile = logging.FileHandler('main.log')
formatter = LogstashFormatterV1()
logFile.setFormatter(formatter)
logger.addHandler(logFile)


class RequestHandler(pyjsonrpc.HttpRequestHandler):
    """ Test Method """
    @pyjsonrpc.rpcmethod
    def add(self, a, b):
        print "add is called with %d and %d" % (a, b)
        return a + b

    """ Get news summaries for a user """
    @pyjsonrpc.rpcmethod
    def getNewsSummariesForUser(self, user_id, page_num):
        logger.info('get news summaries for user')
コード例 #14
0
ファイル: pup.py プロジェクト: jdobes/insights-pup
from logstash_formatter import LogstashFormatterV1
from concurrent.futures import ThreadPoolExecutor
from aiokafka import AIOKafkaConsumer, AIOKafkaProducer
from kafka.errors import KafkaError
from kafkahelpers import ReconnectingClient
from prometheus_async.aio import time
from boto3.session import Session

from pup.utils import mnm, configuration
from pup.utils.fact_extract import extract_facts
from pup.utils.get_commit_date import get_commit_date

# Logging
if any("KUBERNETES" in k for k in os.environ):
    handler = logging.StreamHandler(sys.stdout)
    handler.setFormatter(LogstashFormatterV1())
    logging.root.setLevel(os.getenv("LOGLEVEL", "INFO"))
    logging.root.addHandler(handler)
else:
    logging.basicConfig(
        level=os.getenv("LOGLEVEL", "INFO"),
        format="%(threadName)s %(levelname)s %(name)s - %(message)s")

logger = logging.getLogger('advisor-pup')
try:
    with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace',
              'r') as f:
        NAMESPACE = f.read()
except EnvironmentError:
    logger.info('Not Running on Openshift')
コード例 #15
0
# place. I kept this for reference, but I prefer the refactored code in
# logsetup which makes it more readable and re-usable.
import logging
from logging import StreamHandler
from logstash_formatter import LogstashFormatterV1
from logstash_async.handler import AsynchronousLogstashHandler
import sys
import os

# most of this is from the python logstash example:
# https://pypi.python.org/pypi/python-logstash

host = os.getenv('LOGSTASH_HOST', 'localhost')
port = int(os.getenv('LOGSTASH_PORT', 5959))

formatter = LogstashFormatterV1(fmt='{"extra": {"appname": "lstest"}}')

handler = AsynchronousLogstashHandler(host, port, database_path='logstash.db')
handler.setFormatter(formatter)

test_logger = logging.getLogger('python-logstash-logger')
test_logger.setLevel(logging.INFO)
test_logger.addHandler(handler)
test_logger.addHandler(StreamHandler(stream=sys.stdout))


# this part is from SO:
# https://stackoverflow.com/questions/6234405/logging-uncaught-exceptions-in-python
def handle_exception(exc_type, exc_value, exc_traceback):
    if issubclass(exc_type, KeyboardInterrupt):
        sys.__excepthook__(exc_type, exc_value, exc_traceback)
コード例 #16
0
def main():
    signal.signal(signal.SIGTERM, signal_handler)

    parser = argparse.ArgumentParser(
        description='Export ES query results to Prometheus.')
    parser.add_argument(
        '-e',
        '--es-cluster',
        default='localhost',
        help=
        'addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)'
    )
    parser.add_argument(
        '-p',
        '--port',
        type=int,
        default=8080,
        help='port to serve the metrics endpoint on. (default: 8080)')
    parser.add_argument(
        '--query-disable',
        action='store_true',
        help=
        'disable query monitoring. Config file does not need to be present if query monitoring is disabled.'
    )
    parser.add_argument(
        '-c',
        '--config-file',
        default='exporter.cfg',
        help=
        'path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)'
    )
    parser.add_argument('--cluster-health-disable',
                        action='store_true',
                        help='disable cluster health monitoring.')
    parser.add_argument(
        '--cluster-health-interval',
        type=float,
        default=10,
        help=
        'polling interval for cluster health monitoring in seconds. (default: 10)'
    )
    parser.add_argument(
        '--cluster-health-level',
        default='indices',
        choices=['cluster', 'indices', 'shards'],
        help=
        'level of detail for cluster health monitoring.  (default: indices)')
    parser.add_argument('--nodes-stats-disable',
                        action='store_true',
                        help='disable nodes stats monitoring.')
    parser.add_argument(
        '--nodes-stats-interval',
        type=float,
        default=10,
        help=
        'polling interval for nodes stats monitoring in seconds. (default: 10)'
    )
    parser.add_argument('--indices-stats-disable',
                        action='store_true',
                        help='disable indices stats monitoring.')
    parser.add_argument(
        '--indices-stats-interval',
        type=float,
        default=10,
        help=
        'polling interval for indices stats monitoring in seconds. (default: 10)'
    )
    parser.add_argument(
        '--indices-stats-mode',
        default='cluster',
        choices=['cluster', 'indices'],
        help='detail mode for indices stats monitoring.  (default: cluster)')
    parser.add_argument('-j',
                        '--json-logging',
                        action='store_true',
                        help='turn on json logging.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='turn on verbose logging.')
    args = parser.parse_args()

    log_handler = logging.StreamHandler()
    log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s'
    formatter = LogstashFormatterV1(
    ) if args.json_logging else logging.Formatter(log_format)
    log_handler.setFormatter(formatter)

    logging.basicConfig(handlers=[log_handler],
                        level=logging.DEBUG if args.verbose else logging.INFO)
    logging.captureWarnings(True)

    port = args.port
    es_cluster = args.es_cluster.split(',')
    es_client = Elasticsearch(es_cluster, verify_certs=False)

    scheduler = sched.scheduler()

    if not args.query_disable:
        config = configparser.ConfigParser()
        config.read_file(open(args.config_file))

        query_prefix = 'query_'
        queries = {}
        for section in config.sections():
            if section.startswith(query_prefix):
                query_name = section[len(query_prefix):]
                query_interval = config.getfloat(section, 'QueryIntervalSecs')
                query_indices = config.get(section,
                                           'QueryIndices',
                                           fallback='_all')
                query = json.loads(config.get(section, 'QueryJson'))

                queries[query_name] = (query_interval, query_indices, query)

        if queries:
            for name, (interval, indices, query) in queries.items():
                func = partial(run_query, es_client, name, indices, query)
                run_scheduler(scheduler, interval, func)
        else:
            logging.warn('No queries found in config file %s',
                         args.config_file)

    if not args.cluster_health_disable:
        cluster_health_func = partial(get_cluster_health, es_client,
                                      args.cluster_health_level)
        run_scheduler(scheduler, args.cluster_health_interval,
                      cluster_health_func)

    if not args.nodes_stats_disable:
        nodes_stats_func = partial(get_nodes_stats, es_client)
        run_scheduler(scheduler, args.nodes_stats_interval, nodes_stats_func)

    if not args.indices_stats_disable:
        parse_indices = args.indices_stats_mode == 'indices'
        indices_stats_func = partial(get_indices_stats, es_client,
                                     parse_indices)
        run_scheduler(scheduler, args.indices_stats_interval,
                      indices_stats_func)

    logging.info('Starting server...')
    start_http_server(port)
    logging.info('Server started on port %s', port)

    try:
        scheduler.run()
    except KeyboardInterrupt:
        pass

    shutdown()
コード例 #17
0
                  defaults={
                      'count': settings.LOG_BACKUP_COUNT,
                      'db_dump': settings.DB_BACKUP_FILE,
                      'db_setup': settings.DB_BACKUP_FILE + '.db_setup',
                      'log_file': join(LOG_DIR, 'bank.log'),
                      'when': settings.LOG_ROTATE_WHEN,
                  })

if settings.LOG_SQL_STATEMENTS:
    logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

# TODO Explore and add Tornado logs to the log file as well

logger = logging.getLogger('app')

handler = logging.FileHandler(join(LOG_DIR, 'bank.log.json'))
formatter = LogstashFormatterV1(fmt=json.dumps(
    {'extra': {
        'app': settings.APP_NAME,
        'env': settings.ENVIRONMENT
    }}))

handler.setFormatter(formatter)
logger.addHandler(handler)

# db dump logger
db_dump_logger = logging.getLogger('dbdump')

# db setup logger
db_logger = logging.getLogger('dbsetupdump')
コード例 #18
0
def get_formatter(extra={}):
    return LogstashFormatterV1(fmt=json.dumps({"extra": extra}))
コード例 #19
0
    logger = logging.getLogger('orator.connection.queries')
    logger.setLevel(logging.DEBUG)
    logging.warning('Orator query log started')

# Log
handlers = [
    logging.handlers.RotatingFileHandler(
        "%s/%s" % (app.root_path, app.config['LOG_RESOURCE_PATH']),
        encoding=app.config['BLUEPRINT']['LOGS']['FILE_ENCODING'],
        maxBytes=int(app.config['BLUEPRINT']['LOGS']['FILE_MAX_BYTES']),
        backupCount=int(app.config['BLUEPRINT']['LOGS']['FILE_BACKUP_COUNT'])),
    logging.StreamHandler()
]
fmt = {'extra': {'type': app.config['APP_NAME'] + '-local'}}
jfmt = json.dumps(fmt)
logsts_formatter = LogstashFormatterV1(fmt=jfmt, datefmt="%Y-%m-%d %H:%M:%S")
app.logger.setLevel(logging.DEBUG)
for h in handlers:
    h.setFormatter(logsts_formatter)
    app.logger.addHandler(h)


# Exception Handler
# Invalid response
@app.errorhandler(ConnectionTimeoutException)
def handle_exception(error):
    data = error.get_body()

    logRequest(data, error.get_code())

    return jsonify(data), error.get_code()
コード例 #20
0
ファイル: app.py プロジェクト: CoreSoft2/adjure
def setup_logging(app, config):
    handler = logging.StreamHandler(sys.stdout)
    handler.setFormatter(LogstashFormatterV1())
    app.logger.addHandler(handler)