def set_logging_level(level: Union[str, int] = None, logger='colfixer'): global log if empty(level): level = 'ERROR' if settings.QUIET else env( 'LOG_LEVEL', ('DEBUG' if settings.DEBUG else 'WARNING')) if isinstance(level, str): level = logging.getLevelName(level) _lh = LogHelper(logger, handler_level=level) _lh.add_console_handler() if logger == 'colfixer': log = _lh.get_logger() return _lh.get_logger()
def config_logger(*logger_names, log_dir=BASE_LOG_FOLDER): """ Used to allow isolated parts of this project to easily change the log output folder, e.g. allow Django management commands to change the logs folder to ``crons/`` Currently only used by :class:`payments.management.CronLoggerMixin` Usage: >>> config_logger('someapp', 'otherlogger', 'mylogger', log_dir='/full/path/to/log/folder') :param str logger_names: List of logger names to replace logging config for (see LOGGER_NAMES) :param str log_dir: Fully qualified path. Set each logger's timed_file log directory to this :return: :class:`logging.Logger` instance of BASE_LOGGER """ _lh = LogHelper(BASE_LOGGER, formatter=LOG_FORMATTER, handler_level=logging.DEBUG) _lh.log.handlers.clear() # Force reset the handlers on the base logger to avoid double/triple logging. _lh.add_console_handler(level=CONSOLE_LOG_LEVEL) # Log to console with CONSOLE_LOG_LEVEL _dbg_log = os.path.join(log_dir, 'debug.log') _err_log = os.path.join(log_dir, 'error.log') _lh.add_timed_file_handler(_dbg_log, when='D', interval=1, backups=14, level=DBGFILE_LEVEL) _lh.add_timed_file_handler(_err_log, when='D', interval=1, backups=14, level=ERRFILE_LEVEL) l = _lh.get_logger() # Use the same logging configuration for all privex modules _lh.copy_logger(*logger_names) return l
def _setup_logging(level=logging.WARNING): """ Set up logging for the entire module ``privex.rpcemulator`` . Since this is a package, we don't add any console or file logging handlers, we purely just set our minimum logging level to WARNING to avoid spamming the logs of any application importing it. """ lh = LogHelper(__name__, level=level) return lh.get_logger()
def main(): global log try: vargs = parser.parse_args() except Exception as e: parser.error(f"{type(e)} - {str(e)}") return sys.exit(1) if vargs.verbose_mode: _lh2 = LogHelper('privex.cspgen', handler_level=logging.DEBUG) _lh2.add_console_handler(stream=sys.stderr) log = _lh2.get_logger() log.debug(f"parser args: {vargs!r}") if vargs.show_version: oprint(COPYRIGHT) return COPYRIGHT if vargs.show_example: exfile, expath = read_example_file() exnote = "#####", "#", "# Privex CSPGen example.ini file", f"# Original Location within Python Package: {expath}", "#", "#####\n" oprint(*exnote, exfile, *exnote, sep="\n") return sys.exit(0) filenames = vargs.filenames file_sep, sec_sep = literal(vargs.file_sep), literal(vargs.section_sep) str_secs = [] list_secs = [] if empty(filenames, itr=True): if sys.stdin.isatty(): parser.error("No filenames specified, and no data piped to stdin") return sys.exit(1) log.debug( "Assuming config piped via STDIN. Reading config from stdin.") confd = read_stdin() builder = get_builder(contents=confd) str_secs += [builder.generate('string', sep=sec_sep)] list_secs += [builder.generate('list')] else: for fn in filenames: if fn in ['-', '/dev/stdin', 'STDIN']: log.debug( "Assuming config piped via STDIN. Reading config from stdin." ) builder = get_builder(contents=read_stdin()) else: builder = get_builder(fn) str_secs += [builder.generate('string', sep=sec_sep)] list_secs += [builder.generate('list')] # oprint('file_sep: ', repr(file_sep)) # oprint('sec_sep: ', repr(sec_sep)) oprint(file_sep.join(str_secs)) return list_secs, str_secs
def _setup_logging(level=logging.WARNING): """ Set up logging for the entire module ``privex.eos`` . Since this is a package, we don't add any console or file logging handlers, we purely just set our minimum logging level to WARNING to avoid spamming the logs of any application importing it. """ try: from privex.loghelper import LogHelper lh = LogHelper(__name__, level=level) return lh.get_logger() except ImportError: warnings.warn( f'{__name__} failed to import privex.loghelper. Logging may not work as expected.' ) lh = logging.getLogger(__name__) lh.setLevel(logging.WARNING) return log
from privex.db.sqlite import SqliteAsyncWrapper try: dotenv.read_dotenv() except AttributeError: dotenv.load_dotenv() LOG_LEVEL = env('LOG_LEVEL') LOG_LEVEL = logging.getLevelName(str(LOG_LEVEL).upper()) if LOG_LEVEL is not None else logging.WARNING _setup_logging(LOG_LEVEL) LOG_FORMATTER = logging.Formatter('[%(asctime)s]: %(name)-55s -> %(funcName)-20s : %(levelname)-8s:: %(message)s') _lh = LogHelper('privex.db.tests', handler_level=LOG_LEVEL, formatter=LOG_FORMATTER) _lh.copy_logger('privex.db') _lh.add_console_handler() log = _lh.get_logger() class PrivexTestBase(TestCase): pass class PrivexDBTestBase(PrivexTestBase): """ Base class for all privex-db test classes. Includes :meth:`.tearDown` to reset database after each test. """ def setUp(self) -> None: self.wrp = ExampleWrapper() def tearDown(self) -> None:
app = Flask(__name__) CORS(app) cf = app.config app.url_map.strict_slashes = False for k, v in settings.cf.items(): cf[k] = v # if empty(LOG_LEVEL): # LOG_LEVEL = logging.DEBUG if DEBUG else logging.INFO lh = LogHelper('myip') if settings.USE_RICH_LOGGING: lh.get_logger().addHandler( RichHandler(level=settings.LOG_LEVEL, console=console_err, rich_tracebacks=settings.RICH_TRACEBACKS)) else: lh.add_console_handler(level=settings.LOG_LEVEL, stream=sys.stderr) lh.add_timed_file_handler(settings.DBG_LOG, when='D', interval=1, backups=14, level=settings.LOG_LEVEL) lh.add_timed_file_handler(settings.ERR_LOG, when='D', interval=1, backups=14, level=logging.WARNING)
def handle(self, *args, **options): _lh = LogHelper(__name__, formatter=LOG_FORMATTER, handler_level=logging.INFO) _lh.add_console_handler() _lh.get_logger().propagate = False lockmgr.clean_locks() # Clean up any locks due for expiration. fail = is_true(options['fail']) no_renew = is_true(options['no_renew']) only_renew = is_true(options['only_renew']) no_timeout = is_true(options['no_timeout']) locks: list = options['locks'] process_id: int = int(options['process_id'] ) if options['process_id'] is not None else None locked_by: str = options['locked_by'] timeout = None if no_timeout else int(options['timeout']) lock_args = dict(expires=timeout, locked_by=locked_by, lock_process=process_id) if len(locks) == 0: print('No lock names specified.') return _create = False if only_renew else True _renew = False if no_renew else True try: res = set_lock(*locks, timeout=timeout, locked_by=locked_by, process_id=process_id, fail=fail, create=_create, renew=_renew) print(f"Finished creating / renewing {len(locks)} locks.\n") print("\n====================Status Report=====================\n") print(f" Per-lock:\n") print("\t\t{:<20}{:<20}{:<20}{:<20}\n".format( "Name", "Was Locked?", "Now Locked?", "Status")) for lck_name, lres in res.statuses: print("\t\t{:<20}{:<20}{:<20}{:<20}".format( lck_name, 'YES' if lres.was_locked else 'NO', 'YES' if lres.locked else 'NO', lres.status)) print( "\n========================================================\n") print(" Summary:\n") print(f" Locks Created: {res.counts['created']}") print(f" Locks Renewed: {res.counts['renewed']}") print(f" Renewals Skipped: {res.counts['skip_renew']}") print(f" Creations Skipped: {res.counts['skip_create']}") except LockFail as e: print( "\n---------------------------------------------------------------------------\n" ) print( " [lockmgr.management.commands.set_lock] Caught exception LockFail while creating/setting locks..." ) print( " [lockmgr.management.commands.set_lock] The following existing lock was encountered:\n" ) print(f"\t{e.lock}\n") print( " >>> As you have set -e / --fail, this means that any lock creations or updates triggered during " "this run of set_lock should have been rolled back.") print( " >>> If in doubt, run './manage.py list_locks' to view all current locks.\n" ) print(" !!! Now exiting with return code 2...\n") return sys.exit(2) print("") print("\n=========================================================\n") print("Finished creating / renewing locks.") print("\n=========================================================\n")
signatures=[ '1f1a0212f7b9fe263acaeadf1ec127000dc234c413b543e3c268d251e8d8205b95746f3aa102805eb85d5ee72bf1c80b7' + '14fadf081d29138a6ddab085dafa28604' ]), 'txid': 'c901c52daf57b60242d9d7be67f790e023cf2780', }] IGNORE_KEYS_FIND = ['transaction_id', 'block_num', 'transaction_num'] DEBUG = env_bool('DEBUG', False) lh = LogHelper('golos', handler_level=logging.DEBUG if DEBUG else logging.CRITICAL) lh.add_console_handler() log = lh.get_logger() class GolosTestCase(unittest.TestCase): def setUp(self): self.golos = Api(nodes=NODES, report=DEBUG) def test_get_account(self): """Testing Api.get_accounts returns valid account dictionaries""" a = self.golos.get_accounts(TEST_ACCOUNTS) self.assertIs(type(a), list) self.assertEqual(len(a), len(TEST_ACCOUNTS)) for i, acc in enumerate(a): # log.info('get_accounts %s = %s', acc, a) self.assertIs(type(acc), dict, msg=f'type(acc) is dict') self.assertIn('name', acc, msg=f"'name' in acc")
def _setup_logging(level=logging.WARNING): lh = LogHelper(__name__, level=level) return lh.get_logger()
def set_log_level(level: Union[str, int]) -> logging.Logger: level = logging.getLevelName(str(level).upper()) if isinstance(level, str) else level _lh = LogHelper('steempeers', handler_level=level) _lh.add_console_handler() return _lh.get_logger()