示例#1
0
def cli(ctx: click.Context, verbose: bool=False, no_version_check: bool=False,
        change_dir: str=None, no_log_file: bool=False,
        log_file: str="chaostoolkit.log"):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    if not no_log_file:
        # let's ensure we log at DEBUG level
        logger.setLevel(logging.DEBUG)
        logzero.logfile(
            click.format_filename(log_file), mode='a',
            loglevel=logging.DEBUG)

    logzero.formatter(
        formatter=logzero.LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S"),
        update_custom_handlers=False)

    subcommand = ctx.invoked_subcommand

    # make it nicer for going through the log file
    logger.debug("#" * 79)
    logger.debug("Running command '{}'".format(subcommand))

    if not no_version_check:
        check_newer_version(command=subcommand)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
示例#2
0
    def get_energy_levels(self, temp=23.27, verbose=0):
        if verbose == 0: logger.setLevel(logging.ERROR)
        unique_notations = self.get_unique_entries()
        logger.info("Found unique notations = {0}".format(unique_notations))
        # spec = unique_notations[1]

        for spec in unique_notations:
            direc = os.path.expanduser("~") + '/.nistasd/'

            filename = 'nist_energylevels_' + spec + '.pkl'
            logger.info(
                "Searching for saved energy levels in {0}".format(direc))
            if not os.path.isfile(direc + filename):
                logger.info(
                    "Found no energy levels in {0} for {1}. Downloading energy levels ..."
                    .format(direc, self.spectrum))
                self.energy_levels[spec] = self._parse_energy_levels(
                    spec, temp)

                if not os.path.isdir(direc):
                    os.makedirs(direc)
                pickle.dump(self.energy_levels[spec],
                            open(direc + filename, 'wb'),
                            protocol=2)
            else:
                logger.info("Found energy levels in {0}".format(direc))
                with open(direc + filename, 'rb') as f:
                    self.energy_levels[spec] = pickle.load(f)

        return self.energy_levels
示例#3
0
    def get_lines(self, verbose=0):

        if verbose == 0: logger.setLevel(logging.ERROR)
        # direc = str(pathlib.Path(__file__).resolve().parent) + '/NIST_data/'
        direc = os.path.expanduser("~") + '/.nist-asd/'

        filename = 'nist_lines_' + self.spectrum + '.pkl'
        logger.info("Searching for saved spectrum in {0}".format(direc))
        if not os.path.isfile(direc + filename):
            logger.info(
                "Found no spectrum in {0} for {1}. Downloading spectra ...".
                format(direc, self.spectrum))
            tmp_nistasd = NISTASD(self.spectrum, 0.01, 10000., self.order)
            self.nistasd_obj = tmp_nistasd
            if not os.path.isdir(direc):
                os.makedirs(direc)
            pickle.dump(self.nistasd_obj,
                        open(direc + filename, 'wb'),
                        protocol=2)  # python 2 compat
        else:
            logger.info("Found spectrum in {0}".format(direc))
            with open(direc + filename, 'rb') as f:
                self.nistasd_obj = pickle.load(f)
        self.lines = self.nistasd_obj.lines
        return self.lines
示例#4
0
def get_logger(name):
    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(filename)s:%(lineno)s - %(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    return logger
示例#5
0
def init_logging(app:Flask):
    # logging.basicConfig(format=FORMAT)
    logger = logging.getLogger("timeTravel")
    logger.setLevel(Config.LOG_LEVEL)
        
    if Config.LOG_PATH:
        logfile(
            Config.LOG_PATH,
            maxBytes=1000000,
            backupCount=3,
            loglevel=Config.LOG_LEVEL,
        )
    _register_before_request(app)
    _register_after_request(app)
示例#6
0
def configure_logger(verbose: bool = False,
                     log_format: str = "string",
                     log_file: str = None,
                     logger_name: str = "chaostoolkit",
                     context_id: str = None):
    """
    Configure the chaostoolkit logger.

    By default logs as strings to stdout and the given file. When `log_format`
    is `"json"`, records are set to the console as JSON strings but remain
    as strings in the log file. The rationale is that the log file is mostly
    for grepping purpose while records written to the console can be forwarded
    out of band to anywhere else.
    """
    log_level = logging.INFO
    fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"
    if verbose:
        log_level = logging.DEBUG
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"

    formatter = LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S")
    if log_format == 'json':
        fmt = "(process) (asctime) (levelname) (module) (lineno) (message)"
        if context_id:
            fmt = "(context_id) {}".format(fmt)
        formatter = jsonlogger.JsonFormatter(fmt,
                                             json_default=encoder,
                                             timestamp=True)

    # sadly, no other way to specify the name of the default logger publicly
    LOGZERO_DEFAULT_LOGGER = logger_name
    logger = setup_default_logger(level=log_level, formatter=formatter)
    if context_id:
        logger.addFilter(ChaosToolkitContextFilter(logger_name, context_id))

    if log_file:
        # always everything as strings in the log file
        logger.setLevel(logging.DEBUG)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
        formatter = LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S")
        logzero.logfile(log_file,
                        formatter=formatter,
                        mode='a',
                        loglevel=logging.DEBUG)
示例#7
0
def cli(ctx: click.Context,
        verbose: bool = False,
        no_version_check: bool = False,
        change_dir: str = None,
        no_log_file: bool = False,
        log_file: str = "chaostoolkit.log",
        settings: str = CHAOSTOOLKIT_CONFIG_PATH):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    if not no_log_file:
        # let's ensure we log at DEBUG level
        logger.setLevel(logging.DEBUG)
        logzero.logfile(click.format_filename(log_file),
                        mode='a',
                        loglevel=logging.DEBUG)

    colors = logzero.LogFormatter.DEFAULT_COLORS.copy()
    colors[logging.CRITICAL] = logzero.ForegroundColors.RED
    logzero.formatter(formatter=logzero.LogFormatter(
        fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S", colors=colors),
                      update_custom_handlers=False)

    subcommand = ctx.invoked_subcommand

    # make it nicer for going through the log file
    logger.debug("#" * 79)
    logger.debug("Running command '{}'".format(subcommand))

    ctx.obj = {}
    ctx.obj["settings_path"] = click.format_filename(settings)
    logger.debug("Using settings file '{}'".format(ctx.obj["settings_path"]))

    if not no_version_check:
        check_newer_version(command=subcommand)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
示例#8
0
def _get_logger(logdir,
                logname,
                loglevel=logging.INFO):  # https://wikidocs.net/3736
    fmt = "[%(asctime)s] %(levelname)s: %(message)s"
    formatter = logging.Formatter(fmt)

    # handler = logging.handlers.RotatingFileHandler(
    handler = logging.handlers.TimedRotatingFileHandler(
        filename=os.path.join(logdir, logname),
        # maxBytes=2 * 1024 * 1024 * 1024,
        # backupCount=10,
        when='midnight',
        interval=1,
        encoding='utf-8')
    handler.setFormatter(formatter)

    logger = logging.getLogger("")
    logger.addHandler(handler)
    logger.setLevel(loglevel)
    return logger
示例#9
0
def setup_logger(name, save_dir, distributed_rank, level="INFO"):
    logger = logging.getLogger(name)
    logger.setLevel(10)
    # don't log results for the non-master process
    if distributed_rank > 0:
        return logger
    ch = logging.StreamHandler(stream=sys.stdout)
    ch.setLevel(getattr(logging, level.upper()))
    formatter = logging.Formatter(
        "%(asctime)s %(name)s %(levelname)s: %(message)s")
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    if save_dir:
        fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w')
        fh.setLevel(getattr(logging, level.upper()))
        fh.setFormatter(formatter)
        logger.addHandler(fh)

    return logger
示例#10
0
    def __init__(self, dyda_config_path='', debug=False):
        """ __init__ of ClassifierTensorRT

        Trainer Variables:
            input_data: a list of image array
            results: defined by lab_tools.output_pred_classification (dict)

        Arguments:
            dyda_config_path -- Trainer config filepath
        """

        if debug:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        # Setup dyda config
        super(ClassifierTensorRT,
              self).__init__(dyda_config_path=dyda_config_path)
        self.set_param(self.class_name)

        self.check_param_keys()

        # Create engine
        self.engine = self._load_engine(self.param['model_file'])

        # Get Dim:
        self.c, self.h, self.w = self._get_input_shape()

        # Create execution context
        self.context = self.engine.create_execution_context()
        (self.inputs, self.outputs, self.bindings,
         self.stream) = self._allocate_buffers(self.engine)

        # Setup DL model
        with open(self.param['label_file'], 'r') as f:
            self.labels_map = f.read().split('\n')
示例#11
0
#logger = logging.getLogger()
"""
for handler in logger.handlers:
    handler.setFormatter(logging.Formatter(fmt=__logFormat))
"""

# we set a new handler

handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logzero.LogFormatter(fmt=__logFormat, color=True))
logger.handlers = []
logger.addHandler(handler)

logzero.formatter(logging.Formatter(fmt=__logFormat))

logger.setLevel(conf.loglevel)
"""
Code shortcuts
"""
debug = logger.debug
info = logger.info
warn = logger.warn
error = logger.error
critical = logger.critical

setLevel = logger.setLevel
getLevel = logger.getEffectiveLevel

DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
示例#12
0
def entrypoint():
    """
    Entry-point to be used by CLI.
    """
    logger.setLevel(DEFAULT_VERBOSITY)
    main(sys.argv[1:])
示例#13
0
import sys

import logging
import logzero
from logzero import logger

__logFormat = '[%(asctime)s] %(levelname)-6s| [%(module)s.%(funcName)s] %(message)-80s (%(lineno)d)'

# we set a new handler

handler = logging.StreamHandler(sys.stdout)  # stream to stdout for pycharm
handler.setFormatter(logzero.LogFormatter(fmt=__logFormat))
logger.handlers = []
logger.addHandler(handler)

logger.setLevel(logging.INFO)  # set default level
"""
Code shortcuts
"""
debug = logger.debug
info = logger.info
warn = logger.warn
error = logger.error
critical = logger.critical

setLevel = logger.setLevel
getLevel = logger.getEffectiveLevel

DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
示例#14
0
 def _set_log_level(self):
     logger.setLevel(eval(f"logging.{self._config['twitch']['log_level'].get().upper()}"))
示例#15
0
import sys
import os
import argparse
from pathlib import Path
from datetime import datetime

from logzero import logger
logger.setLevel('DEBUG')


def main():
    parser = argparse.ArgumentParser(
        description='Create HTML from excel file.')
    parser.add_argument(
        'input',
        help=
        'Input filename. Latest file is selected when <input> is a directory.',
        metavar='<input>')
    parser.add_argument('output',
                        help='Output html filename.',
                        metavar='<output>')
    parser.add_argument(
        '--meta',
        help='Translation file for metadata. default: %(default)s',
        metavar='<name>',
        default='items.csv')
    args = parser.parse_args()

    import pandas as pd
    import plotly.express as px
    import plotly.graph_objects as go
示例#16
0
import logging
from peewee import Proxy, SqliteDatabase
from logzero import logger

logger = logging.getLogger('peewee')
logger.setLevel(logging.ERROR)


class PWDatabase:

    __proxy = None

    @staticmethod
    def DBProxy():
        if not PWDatabase.__proxy:
            PWDatabase.__proxy = Proxy()
        return PWDatabase.__proxy

    _db = None

    def __init__(self, path):
        try:
            self._db = SqliteDatabase(path, check_same_thread=False)
            PWDatabase.DBProxy().initialize(self._db)
            self.startup()
        except Exception as e:
            logger.error("database file does not exist, or incorrect permissions")

    def close(self):
        self._db.close()
        self._db = None
示例#17
0
import logging
from peewee import Proxy, SqliteDatabase
from logzero import logger

logger = logging.getLogger('peewee')
logger.setLevel(logging.ERROR)


class PWDatabase(object):

    __proxy = None

    @staticmethod
    def DBProxy():
        if not PWDatabase.__proxy:
            PWDatabase.__proxy = Proxy()
        return PWDatabase.__proxy

    _db = None

    def __init__(self, path):
        try:
            self._db = SqliteDatabase(path, check_same_thread=False)
            PWDatabase.DBProxy().initialize(self._db)
            self.startup()
        except Exception as e:
            logger.error(
                "database file does not exist, or incorrect permissions")

    def close(self):
        self._db.close()
示例#18
0
import logging
from logzero import logger
import multiprocessing as mp
import numpy as np
import os
import PIL.Image as Image
import Queue
from rgb_histo import calc_1d_hist_flatten
import threading
import time

from s3dexp.utils import recursive_glob


if int(os.getenv('VERBOSE', 0)) >= 1:
    logger.setLevel(logging.DEBUG)
else:
    logger.setLevel(logging.INFO)


def minidiamond(base_dir, pattern="*.*", limit=None, fetcher_only=False, async_fetcher=True, use_mp=False):
    """Speed test for Scopelist + Fetcher + RGB
    
    Arguments:
        base_dir {string} -- Base directory to find files
    
    Keyword Arguments:
        pattern {str} -- File name pattern (default: {"*.jpg"})
        limit {integer} -- Stop after (default: {None})
        fetcher_only -- only run fetcher (default: {False})
        async_fetcher {bool} -- run fetcher in a separate thread/process (default: {True})
示例#19
0
import logzero
from logzero import logger
import os
from os import path
import sys

abspath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(abspath)

import setting
log_save_path = setting.log_save_path

# ログの出力名を設定
# logger = logging.getLogger("stock_patterns")
# ログレベルの設定
logger.setLevel(10)

# ログのファイル出力先を設定
if not os.path.exists(log_save_path):
    os.mkdir(log_save_path)

log_file = log_save_path + "/stock_strategy.log"
if not os.path.exists(log_file):
    f = open(log_file, "a")
    f.close()

logzero.logfile(log_file)

# fh = logging.FileHandler(log_file)
# logger.addHandler(fh)
 def __exit__(self, exc_type, exc_value, traceback):
     logger.setLevel(self.curent_level)
 def __enter__(self):
     logger.setLevel(self.tmp_level)
import logging
import os
from typing import List

import graphviz as gv
from logzero import logger as log
from mock import Mock

import util
from core import MessageUnderstanding, States, Context
from core.dialogstates import DialogStates
from logic.responsecomposer import ResponseComposer
from logic.rules.dialogcontroller import application_router
from model import User

log.setLevel(logging.INFO)

Digraph = functools.partial(gv.Digraph, format='svg')


def add_nodes(graph, nodes) -> gv.Digraph:
    for n in nodes:
        if isinstance(n, tuple):
            graph.node(n[0], **n[1])
        else:
            graph.node(n)
    return graph


def add_edges(graph, edges) -> gv.Digraph:
    for e in edges:
示例#23
0
def main(drive_ip, port=5567, verbose=False):
    if verbose:
        logger.setLevel(logging.DEBUG)

    context = zmq.Context()
    router = context.socket(zmq.ROUTER)
    router.bind("tcp://*:{}".format(port))
    logger.info("Listening on port {}".format(port))

    poller = zmq.Poller()
    poller.register(router, zmq.POLLIN)

    kvclient = Client(drive_ip)
    kvclient.connect()
    assert kvclient.is_connected, "Failed to connect to drive"
    logger.info("kv_client connected {}".format(drive_ip))
    kvclient.queue_depth = 16

    # use these to avoid redefining the callback function every time
    # assumption: there are no requests for the same key in near future (no collision)
    pending_requests = dict()  # key -> [address, source proxy message, value]
    ready_requests = dict()  # same

    def data_callbak(msg, cmd, value):
        # fills the value in pending requests and move it to ready_requests
        key = bytes(cmd.body.keyValue.key)
        if cmd.status.code != kinetic_pb2.Command.Status.SUCCESS:
            logger.error("\t Key: " +  str(cmd.body.keyValue.key) + \
                        ", BC: received ackSeq: "+str(cmd.header.ackSequence)+\
                        ", msgType: "+str(MsgTypes.Name(cmd.header.messageType))+\
                        ", statusCode: "+str(StatusCodes.Name(cmd.status.code)))
            value = b''
        else:
            logger.debug("[get] Success: GET " + str(cmd.body.keyValue.key))
            value = value

        t = pending_requests.pop(key)
        t[2] = bytes(value)
        ready_requests[key] = t

    kvclient.callback_delegate = data_callbak

    while True:
        #  Wait for next request from client
        events = poller.poll(0)
        if events:
            address, _, body = router.recv_multipart()
            proxy_msg = Message()
            proxy_msg.ParseFromString(body)

            logger.debug("Recv request from {}, opcode {}".format(
                address, str(proxy_msg.opcode)))

            if proxy_msg.opcode == Message.Opcode.PING:  # trivial
                resp_msg = Message()
                resp_msg.value = b'PONG'
                router.send_multipart(
                    [address, b'', resp_msg.SerializeToString()])

            elif proxy_msg.opcode in (Message.Opcode.GET,
                                      Message.Opcode.GETSMART):
                key = proxy_msg.key
                pending_requests[key] = [address, proxy_msg, None]
                kvclient.get(key)

            else:
                raise NotImplementedError

        # send response
        if ready_requests:
            key, (address, req_msg, value) = ready_requests.popitem()

            if req_msg.opcode == Message.Opcode.GET:
                resp_msg = Message()
                resp_msg.key = key
                resp_msg.opcode = req_msg.opcode
                resp_msg.value = value
                router.send_multipart(
                    [address, b'', resp_msg.SerializeToString()])
            elif req_msg.opcode == Message.Opcode.GETSMART:
                # hack for speed: assume the client won't parse proto message
                router.send_multipart([address, b'', b'\0' * req_msg.size])
            else:
                raise ValueError("Other opcode should not land here: " +
                                 str(req_msg.opcode))
示例#24
0
def with_log_level(logger, level):
    """Set log level to warning temporarily."""
    old_level = logger.level
    logger.setLevel(level)
    yield
    logger.setLevel(old_level)
示例#25
0
from .commands.ide import ide
from .commands.projects import project
from .commands.runs import run
from .commands.tracing import tracing

config = ConfigParser()

if os.path.exists(str(Path.home() / ".dkube.ini")):
    config.read(str(Path.home() / ".dkube.ini"))
elif os.path.exists(str(Path.home() / ".da.ini")):
    config.read(str(Path.home() / ".da.ini"))
else:
    print("you need to run dkube configure first")
    sys.exit(-1)

logger.setLevel(logging.INFO)


@click.group()
@click_log.simple_verbosity_option(logger)
@click.pass_context
def main(ctx):
    """Console script for dkube_cli."""
    ctx.obj = None
    if "default" in config.sections():
        name = config.get("default", "name")
        if name in config.sections():
            url = config.get(name, "url")
            token = config.get(name, "token")
            api = DkubeApi(URL=url, token=token)
            username = config.get(name, "username")
示例#26
0
def main():

    global SSHCMD

    parser = OptionParser()
    parser.add_option('--iterations', type=int, default=10)
    parser.add_option('--controlpersist', action='store_true')
    parser.add_option('--selectors', action='store_true')
    parser.add_option('--use_plugin', action='store_true')
    parser.add_option('--vcount', type=int, default=None)
    parser.add_option('--debug', action='store_true')
    parser.add_option('--hostname', default=None)
    parser.add_option('--username', default=None)
    parser.add_option('--keyfile', default=None)
    parser.add_option('--command', default=None)
    (options, args) = parser.parse_args()

    if not options.debug:
        logger.setLevel('INFO')

    # munge the example ssh command if not using the connection plugin
    if not options.use_plugin:
        validate_control_socket(SSHCMD)
        if not options.controlpersist:
            SSHCMD = remove_control_persist(SSHCMD)

        if options.hostname:
            SSHCMD = set_hostname(SSHCMD, options.hostname)

        if options.username:
            SSHCMD = set_username(SSHCMD, options.username)

        if options.keyfile:
            SSHCMD = set_keyfile(SSHCMD, options.keyfile)

        if options.vcount is not None:
            SSHCMD = set_vcount(SSHCMD, count=options.vcount)

        if options.command is not None:
            SSHCMD[-1] = '/bin/sh -c "%s"' % options.command

        logger.info(SSHCMD)

    # run the command X times and record the durations + speeds
    durations = []
    for x in range(0, options.iterations):
        logger.info('iteration %s' % x)
        start = datetime.now()
        if options.use_plugin:
            (rc, so, se) = run_ssh_exec(
                command=options.command,
                hostname=options.hostname,
                username=options.username,
                keyfile=options.keyfile,
            )
        else:
            (rc, so, se) = run_ssh_cmd(
                SSHCMD,
                hostname=options.hostname,
                username=options.username,
                use_selectors=options.selectors
            )
        stop = datetime.now()
        durations.append(stop - start)
        stats = extract_speeed_from_stdtout(se)
        logger.info('transfer stats ...')
        for k, v in stats.items():
            for k2, v2 in v.items():
                logger.info('%s.%s = %s' % (k, k2, v2))
        logger.info('rc: %s' % rc)
        logger.info('so:%s' % so.strip())
        if rc != 0:
            logger.error(se)
            logger.error('sshcmd: %s' % ' '.join(SSHCMD))

    durations = [x.total_seconds() for x in durations]
    logger.info('durations ...')
    for idx, x in enumerate(durations):
        logger.info('%s. %s' % (idx, x))
    logger.info('duration min: %s' % min(durations))
    logger.info('duration max: %s' % max(durations))
    avg = sum(durations) / float(len(durations))
    logger.info('duration avg: %s' % avg)