def favorites_sync_job(): logging.basicConfig(level=logging.INFO) logger = logging.getLogger('kafka_reader') logger.addHandler(logging.StreamHandler()) logger.addHandler( logstash.TCPLogstashHandler(host=logstash_host.split(":")[0], port=int(logstash_host.split(":")[1]), version=1, tags=["ml-team-2-service"])) try: logger.warning(f"Started adding new favorites") app_favorites = get_app_favorites(logger) logger.warning(f"Got favorites {app_favorites}") request_body = [] for wine_id, user_id in tqdm(app_favorites): request_body.append({ "rating": 5, "variants": 5, "wine": wine_id, "user": user_id }) response = requests.post(f"{OUR_ADDRESS}/review/", json=json.dumps(request_body)) if response.status_code != 200: logger.error(f"Adding favorites failed") logger.error(response.status_code) logger.error(response.text) raise Exception("Adding favorites failed") logger.warning(f"Finished sync favorites") except Exception: logger.exception("Exception while run favorites_sync_job")
def get_logstash(name, host='localhost', type='logstash', port=5959): logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler( logstash.TCPLogstashHandler(host, port, message_type=type, version=1)) return logger
def main(): logger = logging.getLogger() logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler()) # use for testing the script locally CONN = os.environ.get("LOGSTASH_PORT") if CONN is None: logger.critical("====== Couldn't find logstash container -- logging locally =========") logger.critical("====== If you aren't testing locally, then this is an error ========") # Test locally without using the Container CONN=("tcp://localhost:514") url = parse.urlsplit(CONN) # Docker linking sets up the ENV variable #url = parse.urlsplit(os.environ.get("LOGSTASH_PORT")) test_logger = logging.getLogger('logstash!') test_logger.setLevel(logging.DEBUG) test_logger.addHandler(logstash.TCPLogstashHandler(url.hostname, url.port, version=1)) # send some messages test_logger.critical("critial message") test_logger.warning("warning message1") test_logger.info("info message") test_logger.debug("debug message") logger.info("Check the Logstash web interface to make sure the log messages worked.")
def logstash(self): """ Note: This method establishes a connection to logstash using settings in input during class initialization Args: self.SimplicifyConfig: A configuration dictionary that is stored as a JSON file. More documentation on individual configuration attirbutes is stored in the config.json file Returns: returns: On success this method returns a logstash client object """ host = self.SimplicifyConfig['logstash']['host'] port = self.SimplicifyConfig['logstash']['port'] self.logger = logging.getLogger('python-logstash-logger') self.logger.setLevel(logging.INFO) self.logger.addHandler( logstash.TCPLogstashHandler(host, 5000, version=1)) fields = { 'prog_status': '0', 'http_status': '200', 'explanation': 'Simplicify component ({}) is starting up, and is successfully logging' .format(self.program_name) } self.logger.info('simplicify: fields', fields=fields) return self.logger
def setup_logger(self, logger: logging.log, level: int, logger_name: str) -> logging.log: # Create formatter original_formatter = logging.Formatter(AppConfig.APP_FORMATTER) f = CustomFormatter(original_formatter) # Create rotating file handler rfh = logging.handlers.RotatingFileHandler( '{}/{}.log'.format(AppConfig.LOG_FOLDER, logger_name), 'a', AppConfig.LOG_MAX_SIZE, AppConfig.LOG_MAX_FILES) rfh.setLevel(level) rfh.setFormatter(f) # Create console handler use_console_log = self.is_value_active(AppConfig.MAIN_CONFIGURATION, AppConfig.APP_CONSOLE_LOG) ch = None if use_console_log: ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(f) # Create logstash handler if AppConfig.APP_HOST_LOGSTASH is not None and AppConfig.APP_PORT_LOGSTASH is not None: lh = logstash.TCPLogstashHandler(AppConfig.APP_HOST_LOGSTASH, AppConfig.APP_PORT_LOGSTASH, version=1) logger.addHandler(lh) # Add the handlers to the logger logger.addHandler(rfh) if ch: logger.addHandler(ch) logger.propagate = 0 return logger
def __init__(self, args=None): self.__dict__ = self.__shared_state if args is None: return self.args = args name = {'dragon': 'DragonHab', 'bug': 'BugHab', 'test': 'DevHab'} self.name = name[self.args.purpose] self._log = logging.getLogger(self.name) self._log.setLevel(logging.DEBUG) sh = logstash.TCPLogstashHandler('192.168.1.2', 5003) sh.setFormatter(VerboseLogstashFormatter()) self._log.addHandler(sh) formatter = logging.Formatter( '{asctime} {levelname} {filename}:{lineno} {message}', style='{') ch = logging.StreamHandler() ch.setFormatter(formatter) self._log.addHandler(ch) self._metric_log = logging.getLogger(self.name + "_metric") # ch2 = logging.StreamHandler(stream=None) # self._metric_log.addHandler(ch2) self._metric_log.addHandler( logstash.LogstashHandler('192.168.1.2', 5002, version=1)) self._metric_log.setLevel(logging.DEBUG) self._metric_log.propagate = True
def __init__(self, config): self.logger = logging.getLogger('python-logstash-logger') self.logger.setLevel(logging.INFO) self.logger.addHandler( logstash.TCPLogstashHandler(config['host'], config['port'], version=config['version']))
def main(): logstash_handler = logstash.TCPLogstashHandler(host="127.0.0.1", port="5959", version=1) logging.basicConfig( level=logging.DEBUG, handlers=[ logging.StreamHandler(), logstash_handler, ], ) logging.info("Hello hello") while True: r = random.random() + 0.1 time.sleep(r * 2) logging.info("Random %.3f also in extra fields...", r, extra={ "random": r, "sub": { "random": r } })
def setup_logger(self, logger_name, level): # Create logger logger = logging.getLogger(logger_name) logger.setLevel(level) # Create formatter original_formatter = logging.Formatter(InfraConfig.FORMATTER) f = CustomFormatter(original_formatter) # Create rotating file handler rfh = logging.handlers.RotatingFileHandler('../{}.log'.format(logger_name), 'a', InfraConfig.MAX_SIZE, InfraConfig.MAX_FILES) rfh.setLevel(level) rfh.setFormatter(f) # Create console handler ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(f) # Create logstash handler if InfraConfig.HOST_LOGSTASH is not None and InfraConfig.PORT_LOGSTASH is not None: lh = logstash.TCPLogstashHandler(InfraConfig.HOST_LOGSTASH, InfraConfig.PORT_LOGSTASH, version=1) logger.addHandler(lh) # Add the handlers to the logger logger.addHandler(rfh) logger.addHandler(ch) logger.propagate = 0 return logger
def configure_logging(): """Configure debug logging based on app config.""" global debug try: debug = app.config['DEBUG'] if debug: logging.basicConfig(format='%(message)s', level=logging.DEBUG) else: logging.basicConfig(format='%(message)s', level=logging.INFO) except KeyError as err: debug = False try: logstash_config = app.config['LOGSTASH'] logger = logging.getLogger() import logstash host, ls_port = logstash_config.split(':') logger.addHandler(logstash.TCPLogstashHandler(host=host, port=int(ls_port), version=1)) except ImportError as err: logstash = None logging.error('python-logstash module not available %s', err) except KeyError as err: pass
def user_sync_job(): logging.basicConfig(level=logging.INFO) logger = logging.getLogger('kafka_reader') logger.addHandler(logging.StreamHandler()) logger.addHandler( logstash.TCPLogstashHandler(host=logstash_host.split(":")[0], port=int(logstash_host.split(":")[1]), version=1, tags=["ml-team-2-service"])) try: app_users_id = get_app_users_id(logger) our_users_id = get_our_users_id(logger) new_ids = set(app_users_id) - set(our_users_id) logger.warning( f"Started adding new users. Will be added {len(new_ids)} users") request_body = [] for id_ in tqdm(new_ids): request_body.append({"internal_id": id_}) response = requests.post(f"{OUR_ADDRESS}/users/", json=json.dumps(request_body)) if response.status_code != 200: logger.error(f"Adding users failed") logger.error(response.status_code) logger.error(response.text) raise Exception("Adding users failed") logger.warning(f"Finished adding new users") except Exception: logger.exception("Exception while run user_sync_job")
def _init_plugin(self): if not self.disabled: self.logger = logging.getLogger('python-logstash-logger') self.logger.setLevel(logging.DEBUG) self.handler = logstash.TCPLogstashHandler( self.ls_server, self.ls_port, version=1, message_type=self.ls_type) self.logger.addHandler(self.handler) self.hostname = socket.gethostname() self.session = str(uuid.uuid4()) self.errors = 0 self.base_data = {'session': self.session, 'host': self.hostname} if self.ls_pre_command is not None: self.base_data['ansible_pre_command_output'] = os.popen( self.ls_pre_command).read() if self._options is not None: self.base_data['ansible_checkmode'] = self._options.check self.base_data['ansible_tags'] = self._options.tags self.base_data['ansible_skip_tags'] = self._options.skip_tags self.base_data['inventory'] = self._options.inventory
def test_base_tcp_decoding(self): """Assert json decoding of TCP message.""" test_str = u'test-runner: simple message' for version in list(self.logstash_versions.keys()): test_logger = logging.getLogger('python-logstash-logger') test_logger.setLevel(logging.INFO) for handler in test_logger.handlers: test_logger.removeHandler(handler) test_logger.addHandler(logstash.TCPLogstashHandler(self.host, self.port, version=version)) test_logger.error(test_str) recv_str = self.server.RequestHandlerClass.recv_queue.get() try: recv_dict = json.loads(recv_str) except ValueError as e: self.fail("String '%s' could not be parsed as json." %recv_str) err_msg=("Parsed json: '%s' did not have matching message '%s'." % (recv_str, test_str)) version_keys = self.logstash_versions[version] try: recv_dict[version_keys['msg_str']] except KeyError: self.fail("Could not find '%s' in '%s'." % (version_keys['msg_str'], recv_str)) self.assertEqual(recv_dict[version_keys['msg_str']], test_str, msg=err_msg)
def __init__(self, db_api, log_level=logging.INFO): self.db_api = db_api # Set up logging log = logging.getLogger('gamebot_scripts') log.addHandler(logstash.TCPLogstashHandler(LOGSTASH_IP, LOGSTASH_PORT, version=1)) log.setLevel(log_level) log_formatter = coloredlogs.ColoredFormatter(ScriptsFacade.LOG_FMT) log_handler = logging.StreamHandler() log_handler.setFormatter(log_formatter) log.addHandler(log_handler) self.log = log # Test connection to RabbitMQ server host = settings.RABBIT_ENDPOINT username = settings.RABBIT_USERNAME password = settings.RABBIT_PASSWORD credentials = pika.PlainCredentials(username, password) self.conn_params = pika.ConnectionParameters(host=host, credentials=credentials) while True: try: connection = pika.BlockingConnection(self.conn_params) log.info("Connection to RabbitMQ dispatcher verified") break except pika.exceptions.AMQPConnectionError as ex: log.info("The RabbitMQ server is not ready yet...") time.sleep(5) continue connection.close()
def create_logger(logger_name): logger = logging.getLogger(logger_name) if len(logger.handlers) > 0: return logger logger.setLevel(logging.INFO) logger.addHandler(logstash.TCPLogstashHandler('localhost', 5000, version=1)) return logger
def initialize_logstash(logger=None, loglevel=logging.INFO, **kwargs): handler = logstash.TCPLogstashHandler('localhost', 5959, tags=['celery'], message_type='celery', version=1) handler.setLevel(loglevel) logger.addHandler(handler) return logger
def get_logstash_logger(): if get_logstash_logger.logger is None: get_logstash_logger.logger = logging.getLogger( 'python-logstash-logger') get_logstash_logger.logger.setLevel(logging.INFO) get_logstash_logger.logger.addHandler( logstash.TCPLogstashHandler('localhost', 5000, version=1)) #logger.addHandler(logstash.LogstashHandler('localhost', 5959, version=1)) return get_logstash_logger.logger
def initialize_logstash(logger=None, loglevel=logging.INFO, **kwargs): handler = logstash.TCPLogstashHandler(LOGSTASH_HOST, LOGSTASH_PORT, tags=['celery'], message_type='celery', version=1) handler.setLevel(loglevel) logger.addHandler(handler) return logger
def _init_log_handler(self): """ First handler always write to console. Second handler may write to file or elastic stack :return: """ try: handlers = [] if self._config['log_handler'] == 'file': # create the logging file handler try: Path(self._config['path_to_log']).resolve( ).parents[1].mkdir(parents=True, exist_ok=True) except FileExistsError: pass self.log = logging.getLogger(f"{__name__}-python-file-logger") log_handler_ = logging.FileHandler( filename=self._config['path_to_log'], encoding=None, delay=False) format_str = u'%(asctime)s#%(filename)s[LINE:%(lineno)d] %(levelname)-8s %(message)s' format_obj = logging.Formatter(format_str) log_handler_.setFormatter(format_obj) handlers.append(log_handler_) elif self._config['log_handler'] in ('stash', 'logstash'): # create the logging logstash handler self.log = logging.getLogger( f'{__name__}-python-logstash-logger') log_handler = logstash.TCPLogstashHandler( host=self._config['logging_logstash_host'], port=self._config['logging_logstash_port'], version=1) handlers.append(log_handler) # always write to console self.log = self.log if handlers else logging.getLogger( f'{__name__}-python-stdout-logger') log_handler = logging.StreamHandler(sys.stdout) format_str = u'%(asctime)s#%(filename)s[LINE:%(lineno)d] %(levelname)-8s %(message)s' format_obj = logging.Formatter(format_str) log_handler.setFormatter(format_obj) handlers.append(log_handler) # example self.log.setLevel(logging.WARNING) self.log.setLevel(self._config['logging_level'].upper()) list(map(self.log.addHandler, handlers)) # Added all handlers to logger except KeyError as e: raise KeyError(f"Specify necessary environment variable {e}")
def logging_setup(arguments): logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) if arguments.debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) if arguments.logstash: import logstash host, port = arguments.logstash.split(':') logger.addHandler( logstash.TCPLogstashHandler(host=host, port=int(port), version=1))
def __init__(self, logstashAddress, logstashPort, logstashVersion=1): mylogger = MyLogger() self.logger = mylogger.getLogger(__name__) self.address = logstashAddress self.port = logstashPort self.version = logstashVersion self.sender = logging.getLogger("interception logger") if len(self.sender.handlers) == 0: self.sender.setLevel(logging.INFO) self.sender.addHandler( logstash.TCPLogstashHandler(host=self.address, port=self.port, version=self.version))
def setup(self): _tags = ['trinity', self.container.service_name] self.logger = logging.getLogger(self.container.service_name) self.logger.setLevel(self._loglevel) if self.tags: _tags += self.tags _handler = logstash.TCPLogstashHandler( self.container.config.get(LOGSTASH_HOST, self.host), self.container.config.get(LOGSTASH_PORT, self.port), version=self.container.config.get(LOGSTASH_VERSION, self.version), tags=list(set(_tags)), message_type='microservice') self.logger.addHandler(_handler)
def configure_logging(app): app.logger = logging.getLogger("python-logstash-logger") app.logger.setLevel(logging.DEBUG) logstash_host = os.environ.get('LOGSTASH_HOST') handler = logstash.TCPLogstashHandler(logstash_host, 5959, version=1) formatter = LogstashFormatter() handler.setFormatter(formatter) app.logger.addHandler(handler) app.logger.info('Logging setup complete')
def initialize(level=None, log_filepath=None, log_filename=None, use_logstash=None): root_logger = logging.getLogger() if level is None: levelname = config.get('logging.level', "INFO") try: level = getattr(logging, levelname.upper()) except: print "unknown loglevel specified in logging config:", levelname root_logger.setLevel(level) stream_handler = ConsoleHandler() stream_handler.setFormatter(logging.Formatter(ROOT_STREAM_LOGFORMAT)) root_logger.handlers = [] root_logger.addHandler(stream_handler) if use_logstash is None: use_logstash = config.get('logging.use_logstash', "true") == "true" if use_logstash: logstash_handler = logstash.TCPLogstashHandler("localhost", 5959, version=1, message_type="mediatum") root_logger.addHandler(logstash_handler) if log_filepath is None: log_filepath = config.get('logging.file', None) if log_filepath is None: log_dir = config.get("logging.dir", None) if log_dir: if not log_filename: # use name of start script as log file name log_filename = os.path.basename( os.path.splitext(sys.argv[0])[0]) + ".log" log_filepath = os.path.join(log_dir, log_filename) if log_filepath: dlogfiles['mediatum'] = { 'path': log_filepath, 'filename': log_filepath } file_handler = logging.FileHandler(log_filepath) file_handler.setFormatter(logging.Formatter(ROOT_FILE_LOGFORMAT)) root_logger.addHandler(file_handler) logg.info('--- logging everything to %s ---', log_filepath)
def set_logstash(self, logger, logstash_host=None, logstash_port=None): _logger = logger if logstash_host is not None: logger.addHandler( logstash.TCPLogstashHandler(logstash_host, logstash_port)) extra = { 'origin': 'cdici_dispatcher', } _logger = logging.LoggerAdapter(logger, extra) else: pass return _logger
def wrappedF(*args, **kwargs): # Generate a logger ... logger = logging.getLogger(self.base) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') now = dt.now().strftime('%Y-%m-%d_%H-%M-%S') # Generate a file handler if necessary if ('file' in self.specs) and self.specs['file']['todo']: fH = logging.FileHandler('{}/{}.log'.format( self.specs['file']['logFolder'], now)) fH.setFormatter(formatter) logger.addHandler(fH) # Generate a file handler if necessary if ('stdout' in self.specs) and self.specs['stdout']['todo']: cH = logging.StreamHandler(sys.stdout) cH.setFormatter(formatter) logger.addHandler(cH) # Generate a file handler if necessary if ('logstash' in self.specs) and self.specs['logstash']['todo']: tags = ['example1', now] if 'tags' in self.specs['logstash']: tags += self.specs['logstash']['tags'] lH = logstash.TCPLogstashHandler( host=self.specs['logstash']['host'], port=self.specs['logstash']['port'], version=self.specs['logstash']['version'], tags=tags) logger.addHandler(lH) # set the level of the handler logger.setLevel(self.logLevel) logger.info('Starting the main program ...') t0 = time() result = f(logger, *args, **kwargs) logger.info( 'Finished the main program in {:.6e} seconds'.format(time() - t0)) return result
def get_logger(): # bold_seq = '\033[1m' # colorlog_format = ( # f'{bold_seq} ' # '%(log_color)s ' # f'%(asctime)s | %(name)s/%(funcName)s | %(levelname)s: %(message)s' # ) # colorlog.basicConfig(format=colorlog_format.encode(), # level=logging.DEBUG, datefmt='%d/%m/%Y %I:%M:%S %p'.encode()) logger = logging.getLogger("Server") logger.setLevel(logging.DEBUG) logger.addHandler( logstash.TCPLogstashHandler('34.94.244.167', 7000, version=1)) return logger
def get_logger(self): logger = logging.getLogger() logger.setLevel(logging.INFO) if logger.hasHandlers(): return logger logger.addHandler( logstash.TCPLogstashHandler(logstash_conf["host"], logstash_conf["port"], version=1)) logger.addHandler(logging.StreamHandler()) return logger
def __init__( self, team_id, execution_id, script_id, service_id, service_name, script_image_path, script_type, script_name, ip, port, db_client, tick_id, delay=0, setflag_lock=None, ): # Initialize thread stuff threading.Thread.__init__(self) # Make instance vars out of arguments self.delay = delay self.setflag_lock = setflag_lock self.execution_id = execution_id self.ip = ip self.port = port self.db = db_client self.team_id = team_id self.script_id = script_id self.service_id = service_id self.service_name = service_name self.script_image_path = script_image_path self.script_type = script_type self.script_name = script_name self.tick_id = tick_id # More instance vars self.flag_meta = {} self.result = {'error': 0, 'error_msg': 'Init'} self.max_output_send_bytes = settings.MAX_SCRIPT_OUTPUT_BYTES # Set logger self.log = logging.getLogger('scriptbot.script_exec') self.log.setLevel(settings.LOG_LEVEL) self.log.addHandler( logstash.TCPLogstashHandler(LOGSTASH_IP, LOGSTASH_PORT, version=1)) self.log.info('ScriptThread Init')
def __init__(self, registry_username=settings.REGISTRY_USERNAME, registry_password=settings.REGISTRY_PASSWORD, registry_endpoint=settings.REGISTRY_ENDPOINT): self.registry_username = registry_username # FIXME: The login token will expire! we need to refresh it if we want a game that # last more than 8 hours. Getting a token requires AWS APIs though and # we don't want a script to be bounded by the time this takes. self.registry_password = registry_password self.registry_endpoint = registry_endpoint self.docker_client = docker.from_env() self.log = logging.getLogger('scriptbot.registryClient') self.log.addHandler(logstash.TCPLogstashHandler(LOGSTASH_IP, LOGSTASH_PORT, version=1)) if not settings.IS_LOCAL_REGISTRY: self._authenticate()