def main(): # setup logentries. we forward log messages to it le_token = "e8549616-0798-4d7e-a2ca-2513ae81fa17" le_handler = LogentriesHandler(le_token, use_tls=False, verbose=False) le_level = 20 #logging.INFO ctx = zmq.Context().instance() sock = ctx.socket(zmq.PULL) sock.bind("ipc:///tmp/logmessage") # and we publish them pub_sock = messaging.pub_sock('logMessage') while True: dat = b''.join(sock.recv_multipart()) dat = dat.decode('utf8') # print "RECV", repr(dat) levelnum = ord(dat[0]) dat = dat[1:] if levelnum >= le_level: # push to logentries # TODO: push to athena instead le_handler.emit_raw(dat) # then we publish them msg = messaging.new_message() msg.logMessage = dat pub_sock.send(msg.to_bytes())
def main(gctx): # setup logentries. we forward log messages to it le_token = "bc65354a-b887-4ef4-8525-15dd51230e8c" le_handler = LogentriesHandler(le_token, use_tls=False) le_level = 20 #logging.INFO ctx = zmq.Context() sock = ctx.socket(zmq.PULL) sock.bind("ipc:///tmp/logmessage") # and we publish them pub_sock = messaging.pub_sock(ctx, service_list['logMessage'].port) while True: dat = ''.join(sock.recv_multipart()) # print "RECV", repr(dat) levelnum = ord(dat[0]) dat = dat[1:] if levelnum >= le_level: # push to logentries le_handler.emit_raw(dat) # then we publish them msg = messaging.new_message() msg.logMessage = dat pub_sock.send(msg.to_bytes())
def get_logentries_handler(): import os from logentries import LogentriesHandler handler = LogentriesHandler(os.getenv("LOGENTRIES_TOKEN")) handler.setFormatter(formatter) handler.setLevel(logging.INFO) return handler
class LogentriesExtension(object): def __init__(self, token): self.token = token root = logging.getLogger() self.handler = LogentriesHandler(token) spider_id = os.environ.get('SCRAPY_SPIDER_ID') project_id = os.environ.get('SCRAPY_PROJECT_ID') job_id = os.environ.get('SCRAPY_JOB_ID') formatted = False if job_id is not None: formatted = True filter = ScrapingHubFilter({ 'project_id': project_id, 'spider_id': spider_id, 'job_id': job_id, }) format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s" formatter = logging.Formatter(format) self.handler.addFilter(filter) self.handler.setFormatter(formatter) root.addHandler(self.handler) # NCA: not sure we want sensitive information like the token in the logs # Maybe use debug log level instead if formatted: logger.info( 'Logentries activated with token {} and custom SH format'. format(token)) else: logger.info( 'Logentries activated with token {} and no custom SH format'. format(token)) @classmethod def from_crawler(cls, crawler): # first check if the extension should be enabled and raise # NotConfigured otherwise token = crawler.settings.get('LOGENTRIES_TOKEN') if not token: raise NotConfigured # instantiate the extension object ext = cls(token) # return the extension object return ext
class LogentriesExtension(object): def __init__(self, token): self.token = token root = logging.getLogger() self.handler = LogentriesHandler(token) spider_id = os.environ.get('SCRAPY_SPIDER_ID') project_id = os.environ.get('SCRAPY_PROJECT_ID') job_id = os.environ.get('SCRAPY_JOB_ID') formatted = False if job_id is not None: formatted = True filter = ScrapingHubFilter({ 'project_id': project_id, 'spider_id': spider_id, 'job_id': job_id, }) format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s" formatter = logging.Formatter(format) self.handler.addFilter(filter) self.handler.setFormatter(formatter) root.addHandler(self.handler) # NCA: not sure we want sensitive information like the token in the logs # Maybe use debug log level instead if formatted: logger.info('Logentries activated with token {} and custom SH format'.format(token)) else: logger.info('Logentries activated with token {} and no custom SH format'.format(token)) @classmethod def from_crawler(cls, crawler): # first check if the extension should be enabled and raise # NotConfigured otherwise token = crawler.settings.get('LOGENTRIES_TOKEN') if not token: raise NotConfigured # instantiate the extension object ext = cls(token) # return the extension object return ext
def __init__(self, token): self._count = 0.0 self._sum = 0.0 self._lock = Lock() self.token = token handler = LogentriesHandler(token) log.addHandler(handler)
def log_to_logentries(log_statement, log_level, logentries_token, device, remote_logging_id): """ Function that logs information to Logentries. Args: log_statement (str): The message to log. log_level (int): The level on which to log. logentries_token (str): The token of the logset in Logentries. device (Device): The device for which we want to log to Logentries. remote_logging_id (str): The remote logging id of the device. """ if logentries_token not in LOGENTRIES_HANDLERS: LOGENTRIES_HANDLERS[logentries_token] = LogentriesHandler( logentries_token) if LOGENTRIES_HANDLERS[logentries_token].good_config: logentries_logger = logging.getLogger('logentries') logentries_logger.handlers = [LOGENTRIES_HANDLERS[logentries_token]] logentries_logger.log( log_level, '{0} - middleware - {1}'.format(remote_logging_id, log_statement)) else: LOGENTRIES_HANDLERS.pop(logentries_token) log_statement = 'The logentries token is invalid - {0}'.format( device.app.app_id) django_logger.log( log_level, '{0} - middleware - {1}'.format(remote_logging_id, log_statement))
def configure_logging(app): if app.debug or app.testing: # Skip debug and test mode. Just check standard output. return None import logging from logging.handlers import SMTPHandler from logentries import LogentriesHandler # Set info level on logger, which might be overwritten by handers. Suppress DEBUG messages. app.logger.setLevel(logging.INFO) # Main log info_log = os.path.join(app.config['LOG_FOLDER'], 'flask_app.log') info_file_handler = logging.handlers.RotatingFileHandler(info_log, maxBytes=100000, backupCount=10) info_file_handler.setLevel(logging.INFO) info_file_handler.setFormatter( logging.Formatter('%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]')) app.logger.addHandler(info_file_handler) # Log Entries if app.config.get('BOX_TYPE') == 'WEB': le_log = logging.getLogger('logentries') le_log.setLevel(logging.INFO) le_log.addHandler(LogentriesHandler('changeme')) app.logger.addHandler(le_log)
def __init__(self, token): self.token = token root = logging.getLogger() self.handler = LogentriesHandler(token) spider_id = os.environ.get('SCRAPY_SPIDER_ID') project_id = os.environ.get('SCRAPY_PROJECT_ID') job_id = os.environ.get('SCRAPY_JOB_ID') formatted = False if job_id is not None: formatted = True filter = ScrapingHubFilter({ 'project_id': project_id, 'spider_id': spider_id, 'job_id': job_id, }) format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s" formatter = logging.Formatter(format) self.handler.addFilter(filter) self.handler.setFormatter(formatter) root.addHandler(self.handler) # NCA: not sure we want sensitive information like the token in the logs # Maybe use debug log level instead if formatted: logger.info('Logentries activated with token {} and custom SH format'.format(token)) else: logger.info('Logentries activated with token {} and no custom SH format'.format(token))
def init(): ''' Registers the msg-logger ''' handler = None if os.environ.get('LOGENTRIES_TOKEN') is not None: handler = LogentriesHandler(os.environ.get('LOGENTRIES_TOKEN'), format=CustomFormatter()) else: handler = logging.StreamHandler() handler.setFormatter(CustomFormatter()) logger = logging.getLogger('msg-logger') logger.setLevel(logging.INFO) logger.addFilter(CustomFilter()) logger.addHandler(handler)
def setup_logging(loggers_and_levels, logentries_id=None): log = logging.getLogger('logentries') log.setLevel(logging.INFO) logentries_handler = LogentriesHandler(logentries_id) handler = logging.StreamHandler() FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s" formatter = logging.Formatter(fmt=FORMAT) handler.setFormatter(formatter) logentries_handler.setFormatter(formatter) log.addHandler(handler) log.addHandler(logentries_handler) for logger, level in loggers_and_levels: logger.setLevel(level) logger.addHandler(handler) logger.addHandler(logentries_handler)
def __init__(self): self.logger = logging.getLogger('logentries') self.logger.setLevel(logging.INFO) logentries_token = settings.get_env_var("LOGENTRIES_TOKEN") self.logger.addHandler(LogentriesHandler(logentries_token)) self.enabled = True
def get_logger(): FORMAT = str(os.getpid()) + ': %(levelname)s, %(message)s' log = logging.getLogger('logentries') log.setLevel(logging.INFO) if 'LOG_ENTRIES' in os.environ: log.addHandler( LogentriesHandler(os.environ['LOG_ENTRIES'], format=logging.Formatter(FORMAT))) log.addHandler(logging.StreamHandler()) return log
def set_logger(logentries_token): ''' Parameters: logentries_token: logentries access token Returns: log: logger ''' log = logging.getLogger('logentries') log.setLevel(logging.INFO) log.addHandler(LogentriesHandler(logentries_token)) return log
def initialize_logging(): logentries_token = os.environ.get('LOGENTRIES_TOKEN') log_level = os.environ.get("LOG_LEVEL", "INFO") logging.basicConfig(format='[%(levelname)s] %(message)s', level=log_level) if logentries_token: log.addHandler(LogentriesHandler(logentries_token)) else: log.warning( "No LOGENTRIES_TOKEN found in environment. Only logging to local console." ) log.info("Logging initialized, level=%s", log_level)
def get_custom_logger(name): """ Set up loggers according to environment and configuration. """ file = app.config['LOGGER_FILEPATH'] token = app.config['LOGENTRIES_TOKEN'] level = DEBUG if app.config['DEBUG'] else INFO context = app.config['ENVIRONMENT'] log = getLogger(name) log.setLevel(level) # shared by all handlers formatter = Formatter( '[%(asctime)s] ' '[%(process)d] ' '[%(name)s] ' '[%(levelname)s] ' '%(message)s' ) if context != 'testing' and file: file = FileHandler(file, mode='a+') file.setLevel(level) file.setFormatter(formatter) log.addHandler(file) if context == 'development': console = StreamHandler(stream=stdout) console.setLevel(level) console.setFormatter(formatter) log.addHandler(console) if context != 'testing' and token: logentries = LogentriesHandler(token) logentries.setLevel(level) logentries.setFormatter(formatter) log.addHandler(logentries) return log
def __init__(self, token): self.token = token root = logging.getLogger() self.handler = LogentriesHandler(token) spider_id = os.environ.get('SCRAPY_SPIDER_ID') project_id = os.environ.get('SCRAPY_PROJECT_ID') job_id = os.environ.get('SCRAPY_JOB_ID') formatted = False if job_id is not None: formatted = True filter = ScrapingHubFilter({ 'project_id': project_id, 'spider_id': spider_id, 'job_id': job_id, }) format = "%(name)s - %(levelname)s - [project_id=%(project_id)s spider_id=%(spider_id)s job_id=%(job_id)s] %(message)s" formatter = logging.Formatter(format) self.handler.addFilter(filter) self.handler.setFormatter(formatter) root.addHandler(self.handler) # NCA: not sure we want sensitive information like the token in the logs # Maybe use debug log level instead if formatted: logger.info( 'Logentries activated with token {} and custom SH format'. format(token)) else: logger.info( 'Logentries activated with token {} and no custom SH format'. format(token))
class WsHandler(WebSocketHandler): store = GlobalStore() log = logging.getLogger('logentries') log.setLevel(logging.INFO) log.addHandler(LogentriesHandler('58ba03d6-2305-42bd-b0e7-af3ccfd1b698')) def check_origin(self, origin): return True def open(self, user_id): # make it int until handshake validator not completed result = EndpointValidator().load({'user_id': user_id}) if result.errors: print('closing:', result) WsHandler.log.info('closing:', result) self.write_message(json.dumps(result.errors)) self.close() else: WsHandler.store.connected[result.data['user_id']] = self self.user_id = result.data['user_id'] self.write_message( json.dumps({ "type": "handshake", "user_id": result.data['user_id'] })) def on_message(self, message): try: result = PayloadValidator.validate(message, self) except Exception as e: session.rollback() WsHandler.log.exception(e) return if result: pass else: """ successful handshake """ pass def on_close(self): # this needs more work for removing both connected and verified clients WsHandler.store.remove_verified(self.user_id, self)
#!/usr/bin/env python from logentries import LogentriesHandler import logging log = logging.getLogger('logentries') test = LogentriesHandler("edf701b4-c7a9-4f9d-802d-8c9dc26fca51") log.addHandler(test) log.warn("Warning message") raw_input("test");
#!/usr/bin/env python from logentries import LogentriesHandler import logging from flask import Flask, jsonify, request import os listener = Flask(__name__) # Configure the port your postback URL will listen on PORT = 5000 # Note - LOGENTRIES_TOKEN is provided via env variable log = logging.getLogger('logentries') log.setLevel(logging.INFO) dyn = LogentriesHandler(os.environ["LOGENTRIES_TOKEN"]) log.addHandler(dyn) # Enter the following for the bounce postback URL: # SCRIPT_HOST_IP:PORT/bounce?e=@email&r=@bouncerule&t=@bouncetype&dc=@diagnostic&s=@status @listener.route('/bounce', methods=['GET']) def bounce(): e = request.args.get('e') r = request.args.get('r') t = request.args.get('t') dc = request.args.get('dc') s = request.args.get('s') log.info("BOUNCE email='{}' rule='{}' type='{}' diagnostic='{}'\ status='{}'".format(e, r, t, dc, s)) return jsonify(result={"status": 200})
import os import sys import logging from logentries import LogentriesHandler from flask import Flask from flask_sqlalchemy import SQLAlchemy # from sqlalchemy import create_engine APP_HOME = '/ornix' APP_NAME = 'ornix' LOGENTRIES_KEY = os.environ['LOGENTRIES_KEY'] DATABASE_URL = os.environ['DATABASE_URL'] # DATABASE_URL = 'mysql+pymysql://ornix@localhost/ornix?charset=utf8' LOG_FORMAT = '%(asctime)s [%(levelname)s] %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT) log = logging.getLogger(__name__) # lh = logging.FileHandler('{}/var/logs/{}.log'.format(APP_HOME, APP_NAME)) # log.addHandler(lh) log.addHandler(LogentriesHandler(LOGENTRIES_KEY)) app = Flask(APP_NAME) app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app)
def configure(self): logger = logging.getLogger('logentries') handler = LogentriesHandler(self.token) logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger
return template("home", content) if __name__ == '__main__': app = default_app() appRecords = [ "A", "AAAA", "CNAME", "DS", "DNSKEY", "MX", "NS", "NSEC", "NSEC3", "RRSIG", "SOA", "TXT" ] appResolver = os.getenv('APP_RESOLVER', '8.8.8.8') serverHost = os.getenv('IP', 'localhost') serverPort = os.getenv('PORT', '5000') # Now we're ready, so start the server # Instantiate the logger log = logging.getLogger('log') console = logging.StreamHandler() log.setLevel(logging.INFO) log.addHandler(console) if not os.getenv('LOGENTRIES_TOKEN', '') is '': log.addHandler(LogentriesHandler(os.getenv('LOGENTRIES_TOKEN'))) # Now we're ready, so start the server try: app.run(host=serverHost, port=serverPort, server='tornado') except: log.error("Failed to start application server")
Parameters: tweets: a list of tweets to put in redis queue Returns: None ''' for tweet in tweets: redis.Redis.rpush(conn, 'tweets', tweet) return if __name__ == '__main__': import os import sys import logging from logentries import LogentriesHandler import psycopg2 log = logging.getLogger('logentries') log.setLevel(logging.INFO) log.addHandler(LogentriesHandler(os.getenv('PROJECTWORK_LOGENTRIES_TOKEN'))) log.info('Started') log.info('getting bearer token') token = get_token( os.getenv('TWITTER_API_PKEY'), os.getenv('TWITTER_API_SECRET') ) log.info('Getting redis connection') conn = get_redis_connection() log.info('Getting tweets') tweets = get_tweets(token, conn) log.info('Finished') sys.exit(0)
import json import requests import logging import time from logentries import LogentriesHandler import logging from place import * log = logging.getLogger('logentries') log.setLevel(logging.INFO) log.addHandler(LogentriesHandler('56926f61-286f-4224-9ff3-e23ebfa858f6')) API_PLACES = "AIzaSyDHoS63IY3s5KczZujHBhhl70mrQLVo-QE" def getDetails(id, api): request = requests.get("https://maps.googleapis.com/maps/api/place/details/json?placeid=%s&key=%s" % (id.rstrip(), api)) reply = json.loads(request.text.encode('ascii', 'ignore')) print reply return reply def createPlace(details): new_place = Place() if details['result']['place_id']: new_place.set_place_id(details['result']['place_id']) if details['result']['types']: new_place.set_types(details['result']['types']) if details['result']['geometry']['location']: new_place.set_location(str(details['result']['geometry']['location']['lat']) + "," + str(details['result']['geometry']['location']['lng']))
from config import * from event_analyzer_lib import core, utils # Configure Logentries logger = logging.getLogger('logentries') if APP_ENV == 'prod': logger.setLevel(logging.INFO) else: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter( logging.Formatter( '%(asctime)s - %(name)s : %(levelname)s, %(message)s')) logger.addHandler(ch) logentries_handler = LogentriesHandler(LOGENTRIES_TOKEN) logger.addHandler(logentries_handler) # Configure Bugsnag bugsnag.configure(api_key=BUGSNAG_TOKEN, project_root=os.path.dirname(os.path.realpath(__file__))) app = Flask(__name__) # Attach Bugsnag to Flask's exception handler handle_exceptions(app) @app.before_first_request def init_before_first_request(): init_tag = "[Initiation of Service Process]\n"
#!/usr/bin/env python from logentries import LogentriesHandler import logging log = logging.getLogger('logentries') log.setLevel(logging.INFO) # Set my Logentries Token test = LogentriesHandler("LOGENTRIES-TOKEN") log.addHandler(test) # Log away! log.warn("Warning message") log.info("Info message")
def format(self, record): r = LogentriesHandler.format(self, record) return snostrip(r)
def get_le_handler(): # setup logentries. we forward log messages to it le_token = "e8549616-0798-4d7e-a2ca-2513ae81fa17" return LogentriesHandler(le_token, use_tls=False, verbose=False)
import sys from logentries import LogentriesHandler import logging log = logging.getLogger('logentries') log.setLevel(logging.INFO) log.addHandler(LogentriesHandler('06380d85-3900-4507-913b-0b9d4f06f3b7')) def logMsg(message): # simple wrapper for MyLogger.logging to stdout on heroku print str(message) # with open("logs.txt", "a") as myfile: # myfile.write(str(datetime.now())+" : "+str(message)) sys.stdout.flush() log.info(message)
import logging import logging.config import loggly.handlers logging.config.fileConfig('logging.conf') loggerr = logging.getLogger(__name__) # """ # Logentries setting from logentries import LogentriesHandler import logging import time log = logging.getLogger(__name__) log.setLevel(logging.INFO) iamsophy = LogentriesHandler('9e7d01fc-3617-4d2f-aebe-ecadc96eaa16') log.addHandler(iamsophy) # def update_stock(cleanup=True): if cleanup: try: conn = pymysql_conn() with conn.cursor() as cursor: sql = 'TRUNCATE stock;' cursor.execute(sql) conn.commit() finally: conn.close() from .tasks import insert_stock
from django.template import Library from django.template.defaultfilters import stringfilter from django.contrib.humanize.templatetags.humanize import intcomma from logentries import LogentriesHandler import logging import time # temp log = logging.getLogger('logentries') log.setLevel(logging.INFO) handler = LogentriesHandler('28379e13-d9b8-434f-a233-7ec9369d2fcb') log.addHandler(handler) register = Library() @register.filter def get_range(value): """ Returns an iterable list given a integer """ # Handle the case where value is None. Don't just say "if value" in case it is 0. if value is not None: return range(value) return value # Thank you Stackoverflow for this simple and easy solution #http://stackoverflow.com/questions/6481788/format-of-timesince-filter @register.filter(is_safe=True) @stringfilter
syslog_hostname = os.getenv("HOSTNAME", "test-host") syslog_formatter = logging.Formatter('%(asctime)s ' + syslog_hostname + ' %(name)s: %(message)s', datefmt='%b %d %H:%M:%S') syslog_address = os.getenv('SYSLOG_ADDRESS', '') if syslog_address: syslog_host, syslog_udp_port = syslog_address.split(":") syslog_handler = SysLogHandler(address=(syslog_host, int(syslog_udp_port))) syslog_handler.setFormatter(syslog_formatter) syslog_handler.setLevel(syslog_logging_level) logging_handlers.append(syslog_handler) logentries_token = os.getenv('LOGENTRIES_TOKEN', '') if logentries_token: logentries_handler = LogentriesHandler(logentries_token) logentries_handler.setFormatter(syslog_formatter) logentries_handler.setLevel(syslog_logging_level) logging_handlers.append(logentries_handler) logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG, handlers=logging_handlers) def main(): botan_token = os.getenv('BOTAN_TOKEN', '') sc_auth_token = os.environ['SC_AUTH_TOKEN'] store_chat_id = int(os.getenv('STORE_CHAT_ID', '0'))
import os import sys import json import requests from loguru import logger from dotenv import load_dotenv from logentries import LogentriesHandler load_dotenv() cid = os.environ.get('CLIENT_ID') csecret = os.environ.get('CLIENT_SECRET') LOGENTRIES_TOKEN = os.environ.get('LOGENTRIES_TOKEN') LH = LogentriesHandler(LOGENTRIES_TOKEN) logger.add(LH, level='DEBUG', format='{name}:{function}:{line} - {message}') class Member(): def __init__(self, u): self.username = u self.name = "" self.avatar = "" self.bio = "" self.followers = 0 self.following = 0 self.REPOS_URL = f"https://api.github.com/users/{u}/repos" self.repos = [] self.nRepos = 0 self.totalCommits = 0