def initalise_logger( filename, print_level="INFO", file_level="DEBUG", ): """ Sets up (possibly multiprocessing aware) logging. :param filename: Where to save the logs to :param print_level: What level of logging to print to console. Default: 'INFO' :param file_level: What level of logging to print to file. Default: 'DEBUG' """ logger = logging.getLogger() logger.setLevel(getattr(logging, file_level)) formatter = logging.Formatter( "%(asctime)s - %(levelname)s" " - %(processName)s %(filename)s:%(lineno)s" " - %(message)s" ) formatter.datefmt = "%Y-%m-%d %H:%M:%S %p" if filename is not None: fh = logging.FileHandler(filename) fh.setLevel(getattr(logging, file_level)) fh.setFormatter(formatter) logger.addHandler(fh) ch = RichHandler() ch.setLevel(getattr(logging, print_level)) ch.setFormatter(formatter) logger.addHandler(ch) return logger
def verbosity(self, level: Union[str, int]): """ Sets logging configuration for scvi based on chosen level of verbosity. Sets "scvi" logging level to `level` If "scvi" logger has no StreamHandler, add one. Else, set its level to `level`. """ self._verbosity = level scvi_logger.setLevel(level) has_streamhandler = False for handler in scvi_logger.handlers: if isinstance(handler, RichHandler): handler.setLevel(level) logger.info( "'scvi' logger already has a StreamHandler, set its level to {}." .format(level)) has_streamhandler = True if not has_streamhandler: console = Console(force_terminal=True) if console.is_jupyter is True: console.is_jupyter = False ch = RichHandler(show_path=False, console=console, show_time=False) formatter = logging.Formatter("%(message)s") ch.setFormatter(formatter) scvi_logger.addHandler(ch) logger.debug( "Added StreamHandler with custom formatter to 'scvi' logger.")
def verbosity(self, level: Union[str, int]): """ Sets logging configuration for scvi based on chosen level of verbosity. If "scvi" logger has no StreamHandler, add one. Else, set its level to `level`. Parameters ---------- level Sets "scvi" logging level to `level` force_terminal Rich logging option, set to False if piping to file output. """ self._verbosity = level scvi_logger.setLevel(level) if len(scvi_logger.handlers) == 0: console = Console(force_terminal=True) if console.is_jupyter is True: console.is_jupyter = False ch = RichHandler(show_path=False, console=console, show_time=False) formatter = logging.Formatter("%(message)s") ch.setFormatter(formatter) scvi_logger.addHandler(ch) else: scvi_logger.setLevel(level)
def app(ctx, strategy, verbose): """ Automate and mantain a cryptocurrency-based portfolio tracking the market index. STRATEGY: path to the .toml strategy file - see README for more info Run the script without any commands to start an interactive shell. """ # configure logging for the application log = logging.getLogger() log.setLevel(logging.INFO if not verbose else logging.DEBUG) rich_handler = RichHandler() rich_handler.setFormatter( logging.Formatter(fmt="%(message)s", datefmt="[%X]")) log.addHandler(rich_handler) log.propagate = False # initialise application data = toml.load(strategy) if not validate_strategy(data): sys.exit() currency = data["currency"] portfolio = Portfolio(data["portfolio"], currency) exchange_platform = data["exchange"]["platform"] exchange = EXCHANGES[exchange_platform](data["exchange"]["key"], data["exchange"]["secret"]) with console.status("[bold green]Connecting to exchange..."): portfolio.connect(exchange) ctx.obj = State(portfolio, exchange, currency)
def test_exception(): console = Console( file=io.StringIO(), force_terminal=True, width=140, color_system=None, _environ={}, ) handler_with_tracebacks = RichHandler(console=console, enable_link_path=False, rich_tracebacks=True) formatter = logging.Formatter("FORMATTER %(message)s %(asctime)s") handler_with_tracebacks.setFormatter(formatter) log.addHandler(handler_with_tracebacks) log.error("foo") try: 1 / 0 except ZeroDivisionError: log.exception("message") render = handler_with_tracebacks.console.file.getvalue() print(render) assert "FORMATTER foo" in render assert "ZeroDivisionError" in render assert "message" in render assert "division by zero" in render
def get_crystals(self): log_filter = KyberContextFilter() handler = RichHandler() handler.setFormatter( logging.Formatter( "[{name}] Agent: {agent} User: {agent_username} => {message}", style="{")) for root, _, files in os.walk(self.crystal_location): for crystal in files: crystal_file = self.crystal_location / root / crystal if (crystal_file.suffix == ".py" and not crystal_file.stem == "example" and not crystal_file.stem.startswith("__") and crystal_file.name != "__init__.py"): try: c = self.load(crystal_file) c.deathstar = self.deathstar c.log = logging.getLogger( f"deathstar.kybercrystals.{crystal_file.stem}") c.log.propagate = False c.log.addHandler(handler) c.log.addFilter(log_filter) self.loaded.append(c) except Exception as e: log.error(f'Failed loading "{crystal_file}": {e}') log.debug(f"Loaded {len(self.loaded)} kyber crystal(s)")
def setup(): sys.setrecursionlimit(2000) # Default is ~900 rich_handler = RichHandler() rich_handler.setFormatter(Formatter("%(message)s")) basicConfig(level="DEBUG", datefmt="[%X]", handlers=[rich_handler])
def _setup_logging( verbosity: int, log_file: Optional[str] = None, ): default_log_format = ( "%(asctime)s [%(levelname)s](%(name)s:%(funcName)s:%(lineno)d): %(message)s" ) logger = logging.getLogger("") # get root logger logger.setLevel(logging.DEBUG) default_formatter = logging.Formatter(fmt=default_log_format, datefmt="%X") if log_file: log_file_path = Path(log_file).resolve() if not log_file_path.parent.exists(): log_file_path.parent.mkdir(parents=True, exist_ok=True) file_handler = logging.FileHandler(log_file) file_handler.setFormatter(default_formatter) logger.addHandler(file_handler) if verbosity < 0: logger.addHandler(logging.NullHandler()) else: level = logging.WARNING if verbosity == 1: level = logging.INFO if verbosity >= 2: level = logging.DEBUG console_handler = RichHandler(rich_tracebacks=True, log_time_format="[%X]", console=console) console_handler.setLevel(level) console_handler.setFormatter(logging.Formatter("%(message)s")) logger.addHandler(console_handler)
def cli( ctx, config, log_path, file_log_level, verbose, debug, debug_filter, quiet, pynetdicom_log_level, ): """High level DICOM file and network operations""" if quiet: if verbose or debug: cli_error("Can't mix --quiet with --verbose/--debug") # Create Rich Console outputing to stderr for logging / progress bars rich_con = Console(stderr=True) # Setup logging LOG_FORMAT = "%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s" def_formatter = logging.Formatter(LOG_FORMAT) root_logger = logging.getLogger("") root_logger.setLevel(logging.DEBUG) pynetdicom_logger = logging.getLogger("pynetdicom") pynetdicom_logger.setLevel(getattr(logging, pynetdicom_log_level)) stream_formatter = logging.Formatter("%(threadName)s %(name)s %(message)s") stream_handler = RichHandler(console=rich_con, enable_link_path=False) stream_handler.setFormatter(stream_formatter) # logging.getLogger("asyncio").setLevel(logging.DEBUG) if debug: stream_handler.setLevel(logging.DEBUG) elif verbose: stream_handler.setLevel(logging.INFO) elif quiet: stream_handler.setLevel(logging.ERROR) else: stream_handler.setLevel(logging.WARN) root_logger.addHandler(stream_handler) handlers = [stream_handler] if log_path is not None: file_handler = logging.FileHandler(log_path) file_handler.setFormatter(def_formatter) file_handler.setLevel(getattr(logging, file_log_level)) root_logger.addHandler(file_handler) handlers.append(file_handler) if len(debug_filter) > 0: for filter_name in debug_filter: if filter_name not in debug_filters: cli_error("Unknown debug filter: %s" % filter_name) for handler in handlers: handler.addFilter(debug_filters[filter_name]) # Create global param dict for subcommands to use ctx.obj = {} ctx.obj["config_path"] = config ctx.obj["config"] = DcmConfig(config, create_if_missing=True) ctx.obj["rich_con"] = rich_con
def setup_logger(level="INFO", logfile=None): """Setup a logger that uses RichHandler to write the same message both in stdout and in a log file called logfile. Level of information can be customized and dumping a logfile is optional. """ logger = logging.getLogger() # Set up level of information possible_levels = ["INFO", "DEBUG"] if level not in possible_levels: raise ValueError( "Passed wrong level for the logger. Allowed levels are: {}".format( ', '.join(possible_levels))) logger.setLevel(getattr(logging, level)) formatter = logging.Formatter("%(message)s") # Set up stream handler (for stdout) stream_handler = RichHandler(show_time=False, rich_tracebacks=True) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) # Set up file handler (for logfile) if logfile: file_handler = RichHandler(show_time=False, rich_tracebacks=True, console=Console(file=open(logfile, "wt"))) file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
def configure_logging(config): """Configure the root logger based on user config.""" rootlogger = logging.getLogger() while rootlogger.handlers: rootlogger.handlers.pop() try: if config["path"]: logfile_path = os.path.expanduser(config["path"]) else: logfile_path = config["path"] except KeyError: logfile_path = DEFAULT_LOG_FILENAME if logfile_path: logdir = os.path.dirname(os.path.realpath(logfile_path)) if not os.path.isdir(logdir): os.makedirs(logdir) log_level = get_logging_level(config.get("level", "info")) rootlogger.setLevel(log_level) formatter_str = set_formatter_string(config) formatter = logging.Formatter(formatter_str) handler = None if config.get("rich") is not False: handler = RichHandler( rich_tracebacks=True, show_time=config.get("timestamp", True), show_path=config.get("extended", True), ) if logfile_path: file_handler = RotatingFileHandler( logfile_path, maxBytes=config.get("file-size", 50e6) ) file_handler.setLevel(log_level) file_handler.setFormatter(formatter) rootlogger.addHandler(file_handler) if config.get("console"): handler = logging.StreamHandler() handler.setFormatter(formatter) # If we still don't have the handler, we are assuming that # the user wants to switch off logging, let's log only # Critical errors if not handler: handler = logging.StreamHandler() handler.setFormatter(formatter) log_level = get_logging_level("critical") if config.get("filter") and handler: handler.addFilter(ParsingFilter(config, config["filter"])) if handler: handler.setLevel(log_level) rootlogger.addHandler(handler) _LOGGER.info("=" * 40) _LOGGER.info(_("Started opsdroid %s."), __version__)
def __init__(self, work_dir=DEFAULT_WORK_DIR, logfile_name='log.txt', logger_name='logger'): """Initializes the logger. Args: work_dir: The work directory. (default: DEFAULT_WORK_DIR) logfile_name: Name of the log file. (default: `log.txt`) logger_name: Unique name for the logger. (default: `logger`) """ self.logger = logging.getLogger(logger_name) if self.logger.hasHandlers(): # Already existed raise SystemExit( f'Logger `{logger_name}` has already existed!\n' f'Please use another name, or otherwise the ' f'messages from these two logger may be mixed up.') self.logger.setLevel(logging.DEBUG) # Print log message with `INFO` level or above onto the screen. terminal_console = Console(file=sys.stderr, log_time=False, log_path=False) terminal_handler = RichHandler(level=logging.INFO, console=terminal_console, show_time=True, show_level=True, show_path=False) terminal_handler.setFormatter(logging.Formatter('%(message)s')) self.logger.addHandler(terminal_handler) # Save log message with all levels into log file if needed. if logfile_name: os.makedirs(work_dir, exist_ok=True) file_stream = open(os.path.join(work_dir, logfile_name), 'a') file_console = Console(file=file_stream, log_time=False, log_path=False) file_handler = RichHandler(level=logging.DEBUG, console=file_console, show_time=True, show_level=True, show_path=False) file_handler.setFormatter(logging.Formatter('%(message)s')) self.logger.addHandler(file_handler) self.log = self.logger.log self.debug = self.logger.debug self.info = self.logger.info self.warning = self.logger.warning self.error = self.logger.error self.exception = self.logger.exception self.critical = self.logger.critical self.pbar = None
def get_logger(module_name): logger = logging.getLogger(module_name) handler = RichHandler(rich_tracebacks=True, console=console, tracebacks_show_locals=True) handler.setFormatter( logging.Formatter( "[ %(threadName)s:%(funcName)s:%(lineno)d ] - %(message)s")) logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger
def setup_logging(level=logging.INFO): logger = logging.getLogger() logger.setLevel(level) formatter = logging.Formatter("%(message)s") stream_handler = RichHandler(show_time=False, rich_tracebacks=True) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger
def reset_logging_handler(self): """ Resets "scvi" log handler to a basic RichHandler(). This is useful if piping outputs to a file. """ scvi_logger.removeHandler(scvi_logger.handlers[0]) ch = RichHandler(show_path=False, show_time=False) formatter = logging.Formatter("%(message)s") ch.setFormatter(formatter) scvi_logger.addHandler(ch)
def rich_formatter(date_format: str = None, stream: IO[str] = None, rich_tracebacks: bool = False, **_: Any) -> logging.Handler: handler = RichHandler( console=Console(file=stream or sys.stderr), log_time_format=date_format or DateFormat.rich.value, rich_tracebacks=rich_tracebacks, ) formatter = logging.Formatter("%(message)s") handler.setFormatter(formatter) return handler
def setup_custom_logger(name): formatter = logging.Formatter(fmt="{message:s}", style="{") handler = RichHandler(show_time=False, markup=True, rich_tracebacks=True, show_path=False) handler.setFormatter(formatter) logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(handler) logger.addFilter(DuplicateFilter()) return logger
def main(): LOG_FILENAME = "chortest.log" log_file_handler = FileHandler(LOG_FILENAME) log_file_handler.setFormatter( Formatter("[%(asctime)s] - %(levelname)s - %(message)s")) rich_handler = RichHandler() rich_handler.setFormatter(Formatter("%(message)s")) basicConfig(level="DEBUG", datefmt="[%X]", handlers=[rich_handler, log_file_handler]) # try: app(prog_name="chortest", )
def get_logger(name: str, level: int = logging.INFO) -> logging.Logger: """Get new custom logger for name.""" formatter = logging.Formatter( fmt="%(message)s", datefmt="[%X]", ) # handler = logging.StreamHandler() handler = RichHandler(markup=False, rich_tracebacks=True, show_time=False, console=console) handler.setFormatter(formatter) logger = logging.getLogger(name) logger.setLevel(level) logger.addHandler(handler) return logger
def setup(self): if self.has_run_before or current_process().name != "MainProcess": # print("Has been setup before", current_process()) return None # print("Setting up logs", current_process()) # if running from CLI: if Path("pyproject.toml").is_file(): log_dir = "./logs" # if running from iPython / VS Code elif Path("cli.py").is_file(): log_dir = "../logs" else: log_dir = "./logs" # raise AssertionError(f"Could not figure out logging dir") Path(log_dir).mkdir(parents=True, exist_ok=True) filename = f"{log_dir}/log--{self.start_time}.txt" # Create handlers # stream_handler = logging.StreamHandler() stream_handler = RichHandler(rich_tracebacks=True, console=console) file_handler = logging.FileHandler(filename) # Configure level and formatter and add it to handlers stream_handler.setLevel(logging.WARNING) file_handler.setLevel( logging.DEBUG) # error and above is logged to a file stream_handler.setFormatter(logging.Formatter("%(message)s")) file_handler.setFormatter( logging.Formatter( "%(asctime)s | %(name)s | %(levelname)s | %(message)s")) # Add handlers to the logger logger = logging.getLogger(self.name) logger.addHandler(stream_handler) logger.addHandler(file_handler) logger.setLevel(logging.DEBUG) self.has_run_before = True
def setup_logging(args, config): cfg = config['main']['log'] filename = cfg['path'] formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s") root = logging.getLogger() root.setLevel(logging.DEBUG if args.debug else logging.INFO) if filename: # since python default log rotation might break session data in different files, # we need to do log rotation ourselves log_rotation(filename, cfg) file_handler = logging.FileHandler(filename) file_handler.setFormatter(formatter) root.addHandler(file_handler) if cfg['color']: console_handler = RichHandler(rich_tracebacks=True) else: console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) root.addHandler(console_handler) if not args.debug: # disable scapy and tensorflow logging logging.getLogger("scapy").disabled = True logging.getLogger('tensorflow').disabled = True # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=DeprecationWarning) # https://stackoverflow.com/questions/24344045/how-can-i-completely-remove-any-logging-from-requests-module-in-python?noredirect=1&lq=1 logging.getLogger("urllib3").propagate = False requests_log = logging.getLogger("requests") requests_log.addHandler(logging.NullHandler()) requests_log.prpagate = False elif args.no_websocket: logging.getLogger("websockets").disabled = True
def test_markup_and_highlight(): console = Console( file=io.StringIO(), force_terminal=True, width=140, color_system="truecolor", _environ={}, ) handler = RichHandler(console=console) # Check defaults are as expected assert handler.highlighter assert not handler.markup formatter = logging.Formatter("FORMATTER %(message)s %(asctime)s") handler.setFormatter(formatter) log.addHandler(handler) log_message = "foo 3.141 127.0.0.1 [red]alert[/red]" log.error(log_message) render_fancy = handler.console.file.getvalue() assert "FORMATTER" in render_fancy assert log_message not in render_fancy assert "red" in render_fancy handler.console.file = io.StringIO() log.error(log_message, extra={"markup": True}) render_markup = handler.console.file.getvalue() assert "FORMATTER" in render_markup assert log_message not in render_markup assert "red" not in render_markup handler.console.file = io.StringIO() log.error(log_message, extra={"highlighter": None}) render_plain = handler.console.file.getvalue() assert "FORMATTER" in render_plain assert log_message in render_plain
def setup(): """Setup logging based on the configuration in ``vaex.settings`` This function is automatically called when importing vaex. If settings are changed, call :func:`reset` and this function again to re-apply the settings. """ global log_handler if vaex.settings.main.logging.setup: logger.setLevel(logging.DEBUG) # create console handler and accept all loglevels if vaex.settings.main.logging.rich: from rich.logging import RichHandler log_handler = RichHandler() else: log_handler = logging.StreamHandler() # create formatter formatter = logging.Formatter('%(levelname)s:%(threadName)s:%(name)s:%(message)s') # add formatter to console handler log_handler.setFormatter(formatter) log_handler.setLevel(logging.DEBUG) # add console handler to logger logger.addHandler(log_handler) logging.getLogger("vaex").setLevel(logging.ERROR) # default to higest level _set_log_level(vaex.settings.main.logging.error, logging.ERROR) _set_log_level(vaex.settings.main.logging.warning, logging.WARNING) _set_log_level(vaex.settings.main.logging.info, logging.INFO) _set_log_level(vaex.settings.main.logging.debug, logging.DEBUG) # VAEX_DEBUG behaves similar to VAEX_LOGGING_DEBUG, but has more effect DEBUG_MODE = os.environ.get('VAEX_DEBUG', '') if DEBUG_MODE: _set_log_level(DEBUG_MODE, logging.DEBUG)
def get_rich_logger(logfile: str = None, level=logging.INFO): """A colorful logger based on the `rich` python library.""" myLogger = logging.getLogger() # File handler if logfile is not None: touch(logfile) fileHandler = logging.FileHandler(logfile) fileHandler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")) myLogger.addHandler(fileHandler) # Rich handler width, _ = shutil.get_terminal_size() console = Console(color_system='256', width=width) richHandler = RichHandler(console=console) richHandler.setFormatter(logging.Formatter("%(message)s")) myLogger.addHandler(richHandler) # Set level myLogger.setLevel(level) return myLogger
def get_rich_logger( name, level='DEBUG', fmt='%(message)s', datefmt='[%X] ', console=None, ): """ Create and return a logger of a given name and logging level. Arguments: name {str} -- The name of the logger to return. Keyword Arguments: level {logging.level} -- A log level (default: {'DEBUG'}) fmt {str} -- A logging format (default: {'%(message)s'}) datefmt {str} -- A logging date format (default: {'[%X] '}) console {rich.console} -- An optional rich console to use for the logging output (default: {None}) Returns: logging.logger """ try: log_level = logging.getLevelName(level) except Exception as e: log_level = logging.DEBUG formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) handler = RichHandler(console=console or _console) handler.setFormatter(formatter) logger = logging.getLogger(name) logger.handlers = [handler] logger.setLevel(log_level) return logger
def setup_logger(level="INFO", logfile=None): """Setup a logger that uses RichHandler to write the same message both in stdout and in a log file called logfile. Level of information can be customized and dumping a logfile is optional. :param level: level of information :type level: str, optional :param logfile: file where information are stored :type logfile: str """ logger = logging.getLogger( LOGGER_NAME ) # need to give it a name, otherwise *way* too much info gets printed out from e.g. numba # Set up level of information possible_levels = ["INFO", "DEBUG"] if level not in possible_levels: raise ValueError( "Passed wrong level for the logger. Allowed levels are: {}".format( ', '.join(possible_levels))) logger.setLevel(getattr(logging, level)) formatter = logging.Formatter("%(message)s") # Set up stream handler (for stdout) stream_handler = RichHandler(show_time=False, rich_tracebacks=True) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) # Set up file handler (for logfile) if logfile: file_handler = RichHandler(show_time=False, rich_tracebacks=True, console=Console(file=open(logfile, "wt"))) file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
def get_console_handler(level=logging.INFO): console_handler = RichHandler(console=console, markup=True) console_handler.setFormatter(FORMATER) console_handler.setLevel(level) return console_handler
def main(): # parse command line arguments argparser = argparse.ArgumentParser( description= "Scrape sold ebay auctions for average prices and comparisons") argparser.add_argument( "search", help= "The ebay search terms. Separate with comma to search multiple items and compare them. " "Supports advanced patterns such as '-' to exclude words, " "parentheses for OR queries, '*' as wildcards and quotes for literals. " "For more information, see https://www.thebalancesmb.com/mastering-ebay-search-for-sellers-2531709", ) argparser.add_argument( "-l", "--locale", choices=list(EBAY_DOMAINS.keys()), default="en_US", help="The locale to run the search in" " - will set the eBay's country domain and currency / dates parsing.", ) argparser.add_argument( "-a", "--exclude-anomalies", action="store_true", help="Excludes auctions which strays ", ) argparser.add_argument( "-b", "--anomalies-bias", type=float, default=0.5, help="Bias for excluding anomalies" "(e.g. a bias of 0.25 will exclude any auctions which sold at 25%% less or more than the average sold price)." "Only applicaple with --exclude-anomalies. Default is 0.5", ) argparser.add_argument("-c", "--chart", action="store_true", help="Displays the scraped results in chart") argparser.add_argument("-v", "--verbose", action="store_true", help="Increase output log verbosity") args = argparser.parse_args() # configure logging for the application log.setLevel(logging.INFO if not args.verbose else logging.DEBUG) rich_handler = RichHandler() rich_handler.setFormatter( logging.Formatter(fmt="%(message)s", datefmt="[%X]")) log.addHandler(rich_handler) log.propagate = False # start the application log.debug(f"Starting application with args {vars(args)}") data = [] for search in args.search.split(","): items = scrape_search_term(search.strip(), locale_str=args.locale) average = mean(item["price"] for item in items) amount = len(items) # if the exclude_anomalies flag is turned on, calculate the variation from the # anomalies_bias and filer out all auctions whose price falls outside it if args.exclude_anomalies: variation = average * args.anomalies_bias floor = average - variation ceiling = average + variation items = [ item for item in items if floor <= item["price"] <= ceiling ] # recalculate the average once done average = mean(item["price"] for item in items) print( f"excluding {amount - len(items)} under/over-priced anomalies..." ) # calculate start and end range for the search dates = [item["date"] for item in items] start = min(dates) end = max(dates) # add search result to the dataset data.append({ "search": search, "start": start, "end": end, "items": items, "average": average, }) if not data: sys.exit(0) table = Table() table.add_column("Search") table.add_column("Items") table.add_column("Average Price") table.add_column("Min") table.add_column("Max") for search in data: prices = [auction["price"] for auction in search["items"]] table.add_row( search["search"], str(len(search["items"])), locale.currency(search["average"], symbol=True), locale.currency(min(prices), symbol=True), locale.currency(max(prices), symbol=True), ) console.print(table) if args.chart: chart_scraped_data(data)
from rich.console import Console from rich.logging import RichHandler # https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094 # https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302 try: import importlib.metadata as importlib_metadata except ModuleNotFoundError: import importlib_metadata package_name = "scgen" __version__ = importlib_metadata.version(package_name) logger = logging.getLogger(__name__) # set the logging level logger.setLevel(logging.INFO) # nice logging outputs console = Console(force_terminal=True) if console.is_jupyter is True: console.is_jupyter = False ch = RichHandler(show_path=False, console=console, show_time=False) formatter = logging.Formatter("scGen: %(message)s") ch.setFormatter(formatter) logger.addHandler(ch) # this prevents double outputs logger.propagate = False __all__ = ["setup_anndata", "SCGEN", "SCGENVAE"]
from .config import Config from .errors import OpusLoadError from .natives import opus log = logging.getLogger("discodo") if not opus.isLoaded() and not opus.loadDefaultOpus(): raise OpusLoadError( "Cannot load libopus, please check your python architecture." if sys. platform == "win32" else "Cannot load libopus, please install `libopus-dev` if you are using linux." ) accessHandler = RichHandler(rich_tracebacks=True) accessHandler.setFormatter( logging.Formatter( "%(name)s :\t%(request)s %(message)s %(status)d %(byte)d")) genericHandler = RichHandler(rich_tracebacks=True) genericHandler.setFormatter(logging.Formatter("%(name)s :\t%(message)s")) def setLoggingLevel(level) -> None: for logger in [log, logging.getLogger("libav")]: logger.setLevel(level) logger.addHandler(genericHandler) logging.getLogger("sanic").setLevel(logging.INFO) logging.getLogger("sanic.root").addHandler(genericHandler)