コード例 #1
0
class Formatters:
    basic = logging.Formatter(fmt=LogRecordFormat,
                              datefmt=LogDateFormat,
                              style='{')

    colored = ColoredFormatter(fmt=LogRecordFormat,
                               datefmt=LogDateFormat,
                               style='{',
                               level_styles=LogStyle.records,
                               field_styles=LogStyle.fields)

    simpleColored = ColoredFormatter(fmt=SimpleLogRecordFormat,
                                     style='{',
                                     level_styles=LogStyle.records,
                                     field_styles=LogStyle.fields)

    qtColored = ColoredFormatter(fmt=LogRecordFormat,
                                 datefmt=LogDateFormat,
                                 style='{',
                                 level_styles=LogStyle.qtRecords,
                                 field_styles=LogStyle.fields)

    simpleQtColored = ColoredFormatter(fmt=DateLogRecordFormat,
                                       datefmt=LogDateFormat,
                                       style='{',
                                       level_styles=LogStyle.qtRecords,
                                       field_styles=LogStyle.fields)
コード例 #2
0
ファイル: logger.py プロジェクト: aliugenb/AutoUITest
def logCreater(filePath):
    '''
    设置LOG生成器
    '''
    # 创建logger
    logger = logging.getLogger('ximalaya')
    logger.setLevel(logging.DEBUG)
    # 设置文本日志处理器
    fileHandler = logging.FileHandler(filePath, mode='a')
    fileHandler.setLevel(logging.INFO)
    # 设置控制台日志处理器
    consoleHandler = logging.StreamHandler()
    consoleHandler.setLevel(logging.DEBUG)
    # 输出格式
    formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - <%(funcName)s> %(levelname)s: %(message)s")
    # 检测是否有手机
    f = os.popen('adb devices').read().strip()
    fl = f.split('\n')
    if len(fl) == 1:
        formatter1 = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - <%(funcName)s> %(levelname)s: %(message)s")
    else:
        from coloredlogs import ColoredFormatter
        formatter1 = ColoredFormatter('%(asctime)s - %(filename)s[line:%(lineno)d] - <%(funcName)s> %(levelname)s: %(message)s',
                                      level_styles=level_styles)
    fileHandler.setFormatter(formatter)
    consoleHandler.setFormatter(formatter1)
    # 将处理器添加到logger
    logger.addHandler(fileHandler)
    logger.addHandler(consoleHandler)
    return logger
コード例 #3
0
ファイル: log.py プロジェクト: zlannnn/trains
    def _make_stream_handler(cls,
                             level=None,
                             stream=sys.stdout,
                             colored=False):
        ch = logging.StreamHandler(stream=stream)
        ch.setLevel(level)
        formatter = None

        # if colored, try to import colorama & coloredlogs (by default, not in the requirements)
        if colored:
            try:
                import colorama
                from coloredlogs import ColoredFormatter
                colorama.init()
                formatter = ColoredFormatter(
                    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            except ImportError:
                colored = False

        # if we don't need or failed getting colored formatter
        if not colored or not formatter:
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        ch.setFormatter(formatter)
        return ch
コード例 #4
0
ファイル: logging.py プロジェクト: jiangjiechen/HedModTmplGen
def init_logger(log_file=None,
                log_file_level=logging.NOTSET,
                from_scratch=False):
    fmt = "[%(asctime)s %(levelname)s] %(message)s"
    log_format = ColoredFormatter(fmt=fmt)
    # log_format = logging.Formatter()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    console_handler = logging.StreamHandler()
    console_handler.setFormatter(log_format)
    logger.handlers = [console_handler]

    if log_file and log_file != '':
        if from_scratch and os.path.exists(log_file):
            logger.warning('Removing previous log file: %s' % log_file)
            os.remove(log_file)
        path = os.path.dirname(log_file)
        cjj.MakeDir(path)
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(log_file_level)
        file_handler.setFormatter(log_format)
        logger.addHandler(file_handler)

    return logger
コード例 #5
0
ファイル: evaluation.py プロジェクト: marcioaug/hunor
def configure_logger():
    style = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(style)
    colored = ColoredFormatter(style)
    ch = logging.StreamHandler()
    fh = logging.FileHandler(os.path.join('hunor-evaluation.log'))
    ch.setFormatter(colored)
    fh.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addHandler(fh)
コード例 #6
0
ファイル: log.py プロジェクト: youtang1993/trains
 def _make_stream_handler(cls, level=None, stream=sys.stdout, colored=False):
     ch = logging.StreamHandler(stream=stream)
     ch.setLevel(level)
     if colored:
         colorama.init()
         formatter = ColoredFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     else:
         formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
     ch.setFormatter(formatter)
     return ch
コード例 #7
0
ファイル: misc.py プロジェクト: randerson112358/okcupyd
def enable_logger(log_name, level=logging.DEBUG):
    log = logging.getLogger(log_name)
    handler = logging.StreamHandler(sys.stderr)
    handler.setFormatter(
        ColoredFormatter(level_styles={'WARNING': dict(color='red')})
    )
    log.addHandler(handler)
    HostNameFilter.install(handler=handler)

    handler.setLevel(level)
    log.setLevel(level)
コード例 #8
0
    def __init__(self):
        from coloredlogs import ColoredFormatter

        log = logging.getLogger("ymp")
        log.setLevel(logging.WARNING)
        log_handler = TqdmHandler()
        log_handler.setLevel(logging.DEBUG)  # no filtering here
        formatter = ColoredFormatter("YMP: %(message)s")
        log_handler.setFormatter(formatter)
        log.addHandler(log_handler)

        self.log = log
コード例 #9
0
    def _init_logger(self, level="INFO", file_logging=True, **kwargs):
        """Initialize logger."""

        self.logger = get_logger("CactusBot")
        self.logger.propagate = False

        self.logger.setLevel("DEBUG")

        if level is True or level.lower() == "true":
            level = "DEBUG"
        elif level is False or level.lower() == "false":
            level = "WARNING"
        elif hasattr(level, "upper"):
            level = level.upper()

        format = kwargs.get(
            "format",
            "%(asctime)s %(name)s %(levelname)-8s %(message)s"
        )

        formatter = Formatter(format, datefmt='%Y-%m-%d %H:%M:%S')

        try:
            from coloredlogs import ColoredFormatter
            colored_formatter = ColoredFormatter(format)
        except ImportError:
            colored_formatter = formatter
            self.logger.warning(
                "Module 'coloredlogs' unavailable; using ugly logging.")

        stream_handler = StreamHandler()
        stream_handler.setLevel(level)
        stream_handler.setFormatter(colored_formatter)
        self.logger.addHandler(stream_handler)

        if file_logging:
            file_handler = FileHandler("latest.log")
            file_handler.setLevel("DEBUG")
            file_handler.setFormatter(formatter)
            self.logger.addHandler(file_handler)

        get_logger("requests").setLevel(get_level_name("WARNING"))

        self.logger.info("Logger initialized with level '{}'.".format(level))
コード例 #10
0
def set_logger() -> logging.Logger:
    """
    Setup logging
    Returns a log.Logger object
    """
    logger = logging.getLogger()

    logger.setLevel(logging.DEBUG)

    # create console handler and set level to debug
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)

    ch.setFormatter(
        ColoredFormatter(fmt="%(asctime)s.%(msecs)3d "
                         "%(module)15s %(levelname)8s "
                         " %(lineno)3d  %(message)s"))
    logger.addHandler(ch)
    return logger
コード例 #11
0
ファイル: log.py プロジェクト: rbdixon/edope
def configure_logging(level=logging.INFO):
    # Turn on logging
    root = logging.getLogger()
    root.setLevel(level)

    # plain log to file
    debug_log = logging.FileHandler('debug.log')
    debug_log.setLevel(level)
    formatter = logging.Formatter(fmt=FORMAT)
    debug_log.setFormatter(formatter)
    root.addHandler(debug_log)

    # color logs to console
    stderr_log = logging.StreamHandler(sys.stderr)
    stderr_log.setLevel(level)
    color_formatter = ColoredFormatter(fmt=FORMAT,
                                       field_styles=LOG_FIELD_STYLES)
    stderr_log.setFormatter(color_formatter)
    root.addHandler(stderr_log)
コード例 #12
0
def _setup_logging(logfile, debug):
    if debug:
        level = logging.DEBUG
    else:
        level = logging.INFO

    # Turn on logging
    root = logging.getLogger()
    root.setLevel(level)

    # shush little ones
    for mod in ['scapy.loading', 'scapy.runtime']:
        logging.getLogger(mod).setLevel(logging.CRITICAL)

    # Colors and formats
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(level)
    fh = logging.FileHandler(logfile, 'w')
    fh.setLevel(level)
    formatter = ColoredFormatter(fmt=FORMAT, field_styles=LOG_FIELD_STYLES)
    ch.setFormatter(formatter)
    fh.setFormatter(logging.Formatter(FORMAT))
    root.addHandler(ch)
    root.addHandler(fh)
コード例 #13
0
class Benchmark:
    class _Decorators:
        @staticmethod
        def ensure_running(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if not self.is_running:
                    raise RuntimeError(f'The benchmark ({self._identifier}) has already ended or never been invoked.'
                                       ' Run benchmark first via invoking `run()`!')
                return func(self, *args, **kwargs)

            return decorator

        @staticmethod
        def ensure_not_running(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if self.is_running:
                    raise RuntimeError(f'benchmark {self._bench_driver.pid} is already in running.')
                return func(self, *args, **kwargs)

            return decorator

        @staticmethod
        def ensure_invoked(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if not self._bench_driver.has_invoked:
                    raise RuntimeError(f'benchmark {self._identifier} is never invoked.')
                return func(self, *args, **kwargs)

            return decorator

    _file_formatter = ColoredFormatter(
            '%(asctime)s.%(msecs)03d [%(levelname)s] (%(funcName)s:%(lineno)d in %(filename)s) $ %(message)s')
    _stream_formatter = ColoredFormatter('%(asctime)s.%(msecs)03d [%(levelname)8s] %(name)14s $ %(message)s')

    def __init__(self, identifier: str, bench_config: BenchConfig, perf_config: PerfConfig, tegra_config: TegraConfig,
                 rabbit_mq_config: RabbitMQConfig, workspace: Path, max_benches: int, logger_level: int = logging.INFO):
        self._identifier: str = identifier
        self._perf_config: PerfConfig = perf_config
        self._tegra_config: TegraConfig = tegra_config
        self._rabbit_mq_config: RabbitMQConfig = rabbit_mq_config

        self._bench_driver: BenchDriver = bench_config.generate_driver(identifier)
        self._perf: Optional[asyncio.subprocess.Process] = None
        self._tegra: Optional[asyncio.subprocess.Process] = None
        self._max_benches: int = max_benches
        self._end_time: Optional[float] = None
        self._node_type = MachineChecker.get_node_type()

        perf_parent = workspace / 'perf'
        if not perf_parent.exists():
            perf_parent.mkdir()

        bench_output_parent = workspace / 'bench_output'
        if not bench_output_parent.exists():
            bench_output_parent.mkdir()

        # perf_csv contains both info of `perf` and `tegrastats`
        self._perf_csv: Path = perf_parent / f'{identifier}.csv'

        # bench_output contains the output info of `bench_driver` (actual stdout from workload)
        self._bench_output_log: Path = bench_output_parent / f'{identifier}.log'

        log_parent = workspace / 'logs'
        if not log_parent.exists():
            log_parent.mkdir()

        self._log_path: Path = log_parent / f'{identifier}.log'

        # setup for loggers

        logger = logging.getLogger(self._identifier)
        logger.setLevel(logger_level)

        metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
        metric_logger.setLevel(logger_level)

        bench_output_logger = logging.getLogger(f'{self._identifier}-bench_output')
        bench_output_logger.setLevel(logger_level)

    @_Decorators.ensure_not_running
    async def start_and_pause(self, print_log: bool = False):
        self._remove_logger_handlers()

        # setup for loggers

        logger = logging.getLogger(self._identifier)

        fh = logging.FileHandler(self._log_path, mode='w')
        fh.setFormatter(Benchmark._file_formatter)
        logger.addHandler(fh)

        if print_log:
            stream_handler = logging.StreamHandler()
            stream_handler.setFormatter(Benchmark._stream_formatter)
            logger.addHandler(stream_handler)

        # launching benchmark

        logger.info('Starting benchmark...')
        await self._bench_driver.run()
        logger.info(f'The {self._bench_driver.wl_type} benchmark has started. pid : {self._bench_driver.pid}')

        self._pause_bench()

    @_Decorators.ensure_running
    async def monitor(self, print_metric_log: bool = False):
        logger = logging.getLogger(self._identifier)
                
        try:
            # launching perf
            self._perf = await asyncio.create_subprocess_exec(
                    'perf', 'stat', '-e', self._perf_config.event_str,
                    '-p', str(self._bench_driver.pid), '-x', ',', '-I', str(self._perf_config.interval),
                    stderr=asyncio.subprocess.PIPE)

            # launching tegrastats
            # tegrastats uses stdout for printing
            if self._node_type == NodeType.IntegratedGPU:
                self._tegra = await asyncio.create_subprocess_exec(
                    'sudo', '/home/nvidia/tegrastats', '--interval', str(self._tegra_config.interval),
                    stdout=asyncio.subprocess.PIPE)

            # setup for metric logger
            # rabiit_mq is used to read perf values
            # tegra-config files are added just in case but not used
            if self._node_type == NodeType.IntegratedGPU:
                rabbit_mq_handler = RabbitMQHandler(self._rabbit_mq_config, self._bench_driver.name,
                                                    self._bench_driver.wl_type, self._bench_driver.pid,
                                                    self._perf.pid, self._perf_config.interval,
                                                    self._tegra.pid, self._tegra_config.interval,
                                                    self._max_benches)
                rabbit_mq_handler.setFormatter(RabbitMQFormatter(self._perf_config.event_names,
                                                                 self._tegra_config.event_names))

            elif self._node_type == NodeType.CPU:
                rabbit_mq_handler = RabbitMQHandler(self._rabbit_mq_config, self._bench_driver.name,
                                                    self._bench_driver.wl_type, self._bench_driver.pid,
                                                    self._perf.pid, self._perf_config.interval, None, None,
                                                    self._max_benches)
                rabbit_mq_handler.setFormatter(RabbitMQFormatter(self._perf_config.event_names, None))

            metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
            # NOTE: Below rabbit_mq_handler is used to send the logs of metric_logger to rabbitmq
            metric_logger.addHandler(rabbit_mq_handler)
            #metric_logger.addHandler(node_mgr_mq_handler)

            if print_metric_log:
                metric_logger.addHandler(logging.StreamHandler())

            with self._perf_csv.open('w') as fp:
                # print csv header
                fp.write(','.join(self._perf_config.event_names))
                if self._node_type == NodeType.IntegratedGPU:
                    fp.write(',gpu_core_util,gpu_core_freq,gpu_emc_util,gpu_emc_freq'+'\n')
                elif self._node_type == NodeType.CPU:
                    fp.write('\n')

            metric_logger.addHandler(logging.FileHandler(self._perf_csv))

            # perf polling loop

            num_of_events = len(self._perf_config.events)
            # TODO: Perf Ver. can be a problem (ref. to benchmark_copy.py)
            while self._bench_driver.is_running and self._perf.returncode is None:
                record = []
                ignore_flag = False

                for _ in range(num_of_events):
                    raw_line = await self._perf.stderr.readline()

                    line = raw_line.decode().strip()
                    try:
                        value = line.split(',')[1]
                        float(value)
                        record.append(value)
                    except (IndexError, ValueError) as e:
                        ignore_flag = True
                        logger.debug(f'a line that perf printed was ignored due to following exception : {e}'
                                     f' and the line is : {line}')
                if self._node_type == NodeType.IntegratedGPU:
                    # tegra data append(gr3d, gr3dfreq, emc, emcfreq)
                    raw_tegra_line = await self._tegra.stdout.readline()
                    for rec in self.tegra_parser(raw_tegra_line):
                        record.append(rec)

                if not ignore_flag:
                    metric_logger.info(','.join(record))

            logger.info('end of monitoring loop')

        except CancelledError as e:
            logger.debug(f'The task cancelled : {e}')
            self._stop()

        finally:
            try:
                self._kill_perf()
                if self._node_type == NodeType.IntegratedGPU:
                    self._kill_tegra()
                self._bench_driver.stop()
            except (psutil.NoSuchProcess, ProcessLookupError):
                pass

            await self._bench_driver.cleanup()
            logger.info('The benchmark is ended.')
            self._remove_logger_handlers()
            self._end_time = time.time()

    @_Decorators.ensure_running
    async def monitor_bench_output(self, print_bench_output_log: bool = False):
        logger = logging.getLogger(self._identifier)

        try:
            bench_output_logger = logging.getLogger(f'{self._identifier}-bench_output')
            if print_bench_output_log:
                bench_output_logger.addHandler(logging.StreamHandler())

            with self._bench_output_log.open('w') as fp:
                # print bench_output_log header
                fp.write(f'[{self._bench_driver.bench_name}:{self._bench_driver.name}] bench_output\n')

            bench_output_logger.addHandler(logging.FileHandler(self._bench_output_log))
            # FIXME: hard-coded for case of ssd driver
            make_output = False
            if 'ssd' == self._bench_driver.bench_name:
                make_output = True
            while self._bench_driver.is_running and self._bench_driver.async_proc.returncode is None and make_output:
                latency_seconds = ''
                ignore_flag = False
                """
                bench_output_logger.info(f'self._bench_driver.is_running: {self._bench_driver.is_running}')
                bench_output_logger.info(f'self._bench_driver.async_proc.returncode: '
                                         f'{self._bench_driver.async_proc.returncode}')
                bench_output_logger.info(f'make_output: '
                                         f'{make_output}')
                """
                raw_line = await self._bench_driver.async_proc.stdout.readline()
                line = raw_line.decode().strip()
                #bench_output_logger.info(f'{line}')
                #ex) im_detect: 26/100 0.172s
                #ex) timer: 0.333 sec.
                # FIXME: hard-coded for ssd driver
                if "im_detect:" in line:
                    # Eval: latency per image
                    splitted = line.split()
                    latency_seconds = splitted[2].rstrip('s')
                    ignore_flag = False
                elif "time:" in line:
                    # Train: latency per iter
                    splitted = line.split()
                    latency_seconds = splitted[5]
                    ignore_flag = False
                else:
                    # IF "im_detect:" not in `line` and "timer:" not in `line`
                    ignore_flag = True
                    if line is '':
                        break

                if not ignore_flag:
                    bench_output_logger.info(latency_seconds)

            logger.info('end of monitoring bench_output loop')

        except CancelledError as e:
            logger.debug(f'The task cancelled : {e}')
            self._stop()
        finally:
            logger.info('The benchmark output generation is ended.')
            self._remove_logger_handlers()


    @staticmethod
    def tegra_parser(tegrastatline):
        """
        parses tegrastats line information for GR3D & EMC with freq
        :param tegrastatline:
        :return:
        """
        _tegra_lists = tegrastatline.decode().strip().split(' ')

        def freq_parser(_utilfreq):
            val=_utilfreq.split('@')[0]
            return [val[:-1], _utilfreq.split('@')[1]]
        # [%,freq]
        emc_freq = freq_parser(_tegra_lists[7])
        gr3d_freq = freq_parser(_tegra_lists[9])

        return [gr3d_freq[0], gr3d_freq[1], emc_freq[0], emc_freq[1]]

    def _pause_bench(self):
        logging.getLogger(self._identifier).info('pausing...')

        self._bench_driver.pause()

    def pause(self):
        self._pause_bench()
        self._perf.send_signal(SIGSTOP)
        # Tegrastats
        if self._node_type == NodeType.IntegratedGPU:
            self._tegra.send_signal(SIGSTOP)

    @_Decorators.ensure_running
    def resume(self):
        logging.getLogger(self._identifier).info('resuming...')

        self._bench_driver.resume()
        if self._perf is not None and self._perf.returncode is None:
            self._perf.send_signal(SIGCONT)
        # Tegrastats
        if self._node_type == NodeType.IntegratedGPU:
            if self._tegra is not None and self._tegra.returncode is None:
                self._tegra.send_signal(SIGCONT)

    def _kill_perf(self):
        if self._perf is not None and self._perf.returncode is None:
            self._perf.kill()
        self._perf = None

    # Tegrastats
    def _kill_tegra(self):
        if self._tegra is not None and self._tegra.returncode is None:
            # tegra runs on sudo command...
            os.system("sudo kill %d"%(self._tegra.pid))

#            self._tegra.kill()
        self._tegra = None

    def _stop(self):
        logger = logging.getLogger(self._identifier)
        logger.info('stopping...')

        try:
            self._kill_perf()
            self._kill_tegra()
            self._bench_driver.stop()
        except (psutil.NoSuchProcess, ProcessLookupError) as e:
            logger.debug(f'Process already killed : {e}')

    def _remove_logger_handlers(self):
        logger = logging.getLogger(self._identifier)
        metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
        bench_output_logger = logging.getLogger(f'{self._identifier}-bench_output')

        for handler in tuple(metric_logger.handlers):  # type: Handler
            logger.debug(f'removing metric handler {handler}')
            metric_logger.removeHandler(handler)
            try:
                handler.flush()
                handler.close()
            except:
                logger.exception('Exception has happened while removing handler from metric logger.')

        for handler in tuple(bench_output_logger.handlers):  # type: Handler
            logger.debug(f'removing bench_output handler {handler}')
            bench_output_logger.removeHandler(handler)
            try:
                handler.flush()
                handler.close()
            except:
                logger.exception('Exception has happened while removing handler from bench_output logger.')

        for handler in tuple(logger.handlers):  # type: Handler
            logger.removeHandler(handler)
            handler.flush()
            handler.close()

    @property
    @_Decorators.ensure_invoked
    def launched_time(self) -> float:
        """
        The launched_time is different based on the benchmark name
        :return:
        """
        if(self._bench_driver.bench_name == 'spark-submit'):
          return self._bench_driver.sparkGPU_launched_time
        else :
          return self._bench_driver.created_time

    @property
    def identifier(self) -> str:
        return self._identifier

    @property
    def end_time(self) -> Optional[float]:
        return self._end_time

    @property
    def runtime(self) -> Optional[float]:
        if self._end_time is None:
            return None
        elif self._end_time < self.launched_time:
            return None
        else:
            return self._end_time - self.launched_time

    @property
    def is_running(self) -> bool:
        return self._bench_driver.is_running and (self._perf is None or self._perf.returncode is None)
コード例 #14
0
    parser.add_argument('-ranges', nargs='*', action="store", dest='ranges', help='Range of dates to be downloaded. '
                                                                                  'Keep the following format: '
                                                                                  'YYYY-mm-dd, with no quotes or spaces'
                                                                                  'spaces between the numbers. The '
                                                                                  'dates must to be in pairs, where '
                                                                                  'the first is start date, following '
                                                                                  'the stop date.')
    parser.add_argument('-verbose', action="store", dest='verbose', help='Print log of processing')
    args = parser.parse_args()

    if bool(args.verbose):
        log = logging.getLogger('')

        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.INFO)
        cf = ColoredFormatter("[%(asctime)s] {%(filename)-15s:%(lineno)-4s} %(levelname)-5s: %(message)s ")
        ch.setFormatter(cf)
        log.addHandler(ch)

        fh = logging.FileHandler('logging.log')
        fh.setLevel(logging.INFO)
        ff = logging.Formatter("[%(asctime)s] {%(filename)-15s:%(lineno)-4s} %(levelname)-5s: %(message)s ",
                               datefmt='%Y.%m.%d %H:%M:%S')
        fh.setFormatter(ff)
        log.addHandler(fh)

        log.setLevel(logging.DEBUG)
    else:
        logging.basicConfig(format="%(levelname)s: %(message)s")

    main(args.ranges)
コード例 #15
0
class AutopsyLogger(logging.getLoggerClass()):

    RED = "red"
    GREEN = "green"
    MAGENTA = "magenta"
    # BLACK   = "black"
    WHITE = "white"
    CYAN = "cyan"
    YELLOW = "yellow"
    BLUE = "blue"
    GREY = "grey"

    dateFmt = "%Y-%m-%d %H:%M:%S"
    # FORMATTER = logging.Formatter(fmt='%(asctime)s.%(msecs)03d: %(levelname)-8s : '
    #                                   '[%(threadName)s] - (%(funcName)-10s) - %(message)s', datefmt=dateFmt)
    FORMATTER = ColoredFormatter(
        fmt='%(asctime)s.%(msecs)03d: %(levelname)-8s : '
        '[%(threadName)-10s] - (%(funcName)-15s) - %(message)s',
        level_styles={
            "debug": {
                "color": "blue"
            },
            "info": {
                "color": "white"
            },
            "warning": {
                "color": "magenta"
            },
            "error": {
                "color": "red"
            },
            "critical": {
                "color": "red",
                "bold": True
            }
        },
        field_styles={
            "asctime": {
                "color": "yellow"
            },
            "msec": {
                "color": "yellow"
            },
            "levelname": {
                "color": "blue",
                "bold": True
            },
            "funcName": {
                "color": "cyan"
            }
        })

    def __init__(self, name="Main Logger", handlers=None, stderr=True):
        """
        :param name: Name of the logger, Default "Main Logger"
        :param handlers: List of handlers to attach
        :param stderr: If true, default StreamHandler to stderr is attached
        :return:
        """
        Logger.__init__(self, name)

        self.setLevel(logging.DEBUG)
        self.propagate = False

        if handlers and len(handlers) > 0:
            for handler in handlers:
                if isinstance(handler, logging.Handler):
                    handler.setFormatter(self.FORMATTER)
                    self.addHandler(handler)

        if stderr:
            self.stderrHandler = self.addStreamHandler()

    def setLogLevelStderrHandler(self, loggingLevel):
        self.stderrHandler.setLevel(loggingLevel)

    def addStreamHandler(self, loggingLevel=logging.DEBUG):
        logStreamHandler = logging.StreamHandler()
        # logStreamHandler = RainbowLoggingHandler(sys.stderr, datefmt=self.dateFmt,
        #                                          color_asctime=("yellow", None, False),
        #                                          color_msecs=("yellow", None, False),
        #                                          color_levelname=("blue", None, True))
        logStreamHandler.setFormatter(self.FORMATTER)
        logStreamHandler.setLevel(loggingLevel)

        self.addHandler(logStreamHandler)

        return logStreamHandler

    def addFileHandler(self, filePath, loggingLevel=logging.DEBUG):
        logFileHandler = logging.FileHandler(filePath, mode='w')
        logFileHandler.setLevel(loggingLevel)
        logFileHandler.setFormatter(self.FORMATTER)

        self.addHandler(logFileHandler)

    def logColor(self,
                 level,
                 msg,
                 fg_color=None,
                 bg_color=None,
                 bold=False,
                 blink=False,
                 dark=False,
                 underline=False,
                 reverse=False,
                 concealed=False,
                 *args,
                 **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).log(level, msg)

    def debug(self,
              msg,
              fg_color=GREEN,
              bg_color=None,
              bold=False,
              blink=False,
              dark=False,
              underline=False,
              reverse=False,
              concealed=False,
              *args,
              **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).debug(msg)

    def info(self,
             msg,
             fg_color=None,
             bg_color=None,
             bold=False,
             blink=False,
             dark=False,
             underline=False,
             reverse=False,
             concealed=False,
             *args,
             **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).info(msg)

    def warning(self,
                msg,
                fg_color=None,
                bg_color=None,
                bold=False,
                blink=False,
                dark=False,
                underline=False,
                reverse=False,
                concealed=False,
                *args,
                **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).warning(msg)

    def error(self,
              msg,
              fg_color=None,
              bg_color=None,
              bold=True,
              blink=False,
              dark=True,
              underline=False,
              reverse=False,
              concealed=False,
              *args,
              **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).error(msg)

    def critical(self,
                 msg,
                 fg_color=WHITE,
                 bg_color=RED,
                 bold=True,
                 blink=False,
                 dark=True,
                 underline=False,
                 reverse=False,
                 concealed=False,
                 *args,
                 **kwargs):
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if dark:
            attr.append("dark")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if concealed:
            attr.append("concealed")

        if "%s" not in msg:
            msg = colored(msg,
                          color=fg_color,
                          on_color=("on_" + bg_color) if bg_color else None,
                          attrs=attr)
        super(AutopsyLogger, self).critical(msg)

    # TODO: v_line_bold is not supported
    def header_box(self,
                   msg="",
                   level=logging.INFO,
                   width=70,
                   left_margin=2,
                   top_margin=2,
                   h_box_char='*',
                   v_box_char="|",
                   h_line_bold=False,
                   v_line_bold=False,
                   un_ended=True):

        brokenLines = [
            msg[i:i + (width - (2 * left_margin + 2))]
            for i in range(0, len(msg), (width - (2 * left_margin + 2)))
        ]

        # TODO: Handle "new-line" chars in the msg properly
        # finalLines = []
        # for line in brokenLines:
        #     map(finalLines.append, line.splitlines())
        # brokenLines = finalLines

        if h_box_char == '-' or h_box_char == '=':
            self.log(level, '+' + (h_box_char * (width - 2)) + '+')
        else:
            self.log(level, (h_box_char * width))

        if h_line_bold:
            if h_box_char == '-' or h_box_char == '=':
                self.log(level, '+' + (h_box_char * (width - 2)) + '+')
            else:
                self.log(level, (h_box_char * width))

        for i in range(top_margin):
            self.log(
                level, v_box_char + (" " * (width - 2)) +
                (v_box_char if un_ended else ""))

        for line in brokenLines:
            temp_line = v_box_char + (" " * left_margin) + line.replace("\n", " ") + (" " * left_margin) + \
                        (v_box_char if un_ended else "")
            gap = width - len(temp_line)
            line = v_box_char + (" " * left_margin) + line.replace("\n", " ") + " " * gap + \
                   (" " * left_margin) + (v_box_char if un_ended else "")
            self.log(level, line)

        for i in range(top_margin):
            self.log(
                level, v_box_char + (" " * (width - 2)) +
                (v_box_char if un_ended else ""))

        if h_line_bold:
            if h_box_char == '-' or h_box_char == '=':
                self.log(level, '+' + (h_box_char * (width - 2)) + '+')
            else:
                self.log(level, (h_box_char * width))

        if h_box_char == '-' or h_box_char == '=':
            self.log(level, '+' + (h_box_char * (width - 2)) + '+')
        else:
            self.log(level, (h_box_char * width))

    def header1_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=80,
                        left_margin=3,
                        top_margin=2,
                        h_box_char="#",
                        v_box_char="#",
                        h_line_bold=True,
                        un_ended=False)

    def header2_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=80,
                        left_margin=3,
                        top_margin=2,
                        h_box_char="#",
                        v_box_char="#",
                        h_line_bold=True)

    def header3_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=74,
                        left_margin=3,
                        top_margin=2,
                        h_box_char="*",
                        v_box_char="*",
                        h_line_bold=True)

    def header4_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=70,
                        left_margin=2,
                        top_margin=2,
                        h_box_char="=")

    def header5_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=70,
                        left_margin=2,
                        top_margin=2,
                        h_box_char="*",
                        v_box_char="*")

    def header6_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=70,
                        left_margin=1,
                        top_margin=0,
                        h_box_char="*",
                        v_box_char="*")

    def header7_box(self, msg="", level=logging.INFO):
        self.header_box(msg,
                        level,
                        width=70,
                        left_margin=1,
                        top_margin=0,
                        h_box_char="-")

    def horiz_line(self, size=72, level=logging.INFO):
        self.log(level, "-" * size)

    # THINK TWICE or MORE BEFORE CHANGING THIS FUNCTION. Derived from logging.py
    def findCaller(self):
        """
        Find the stack frame of the caller so that we can note the source
        file name, line number and function name.
        """
        f = currentframe()
        if f is not None:
            f = f.f_back
        rv = "(unknown file)", 0, "(unknown function)"
        while hasattr(f, "f_code"):
            co = f.f_code
            filename = os.path.normcase(co.co_filename)
            if filename == _srcfile or "logging/__init__.py" in filename:
                f = f.f_back
                continue

            if co.co_name.startswith("__") and co.co_name.endswith("__"):
                instance = f.f_locals.get("self", None)
                if instance:
                    rv = (co.co_filename, f.f_lineno,
                          instance.__class__.__name__ + "." + co.co_name)
                else:
                    rv = (co.co_filename, f.f_lineno, co.co_name)
            else:
                rv = (co.co_filename, f.f_lineno, co.co_name)
            break
        return rv
コード例 #16
0
ファイル: base.py プロジェクト: isac322/benchmon
class BaseBenchmark(Generic[_CFG_T], ContextReadable, metaclass=ABCMeta):
    """
    벤치마크의 실행과 일시 정지, 모니터링을 담당하는 high-level 클래스들의 부모 클래스.

    각각의 특성에 따라 이 클래스를 상속받고, abstractmethod들을 채워야한다.

    벤치마크의 역할:

    * 벤치마크 실행
        * :meth:`_start` 를 override 해야한다.
        * 상황에 따라서 :mod:`드라이버 <benchmon.benchmark.drivers>` 를 사용하여 벤치마크를 실행 할 수도 있다.
    * 벤치마크 일시 정지와 재시작
        * 벤치마크의 특성에 따라서 지원할 수도 안할 수도 있다.
    * 벤치마크 모니터링
        * 이 클래스는 :class:`모니터 <benchmon.monitors.base.BaseMonitor>` 를 여러개 가지며,
          벤치마크 실행 후 :meth:`monitor` 로 모니터링을 시작할 수 있다.
        * 벤치마크에 `모니터` 를 등록하는 방법은 각 벤치마크의 빌더를 통해서 할 수 있다.
            * 즉 벤치마크 생성시 한번에 모든 `모니터` 를 등록 해야하며, 중간에 `모니터` 를 추가하거나 삭제할 수 없다.
        * `모니터` 를 통해 생성된 데이터는 :class:`파이프라인 <benchmon.monitors.pipelines.base.BasePipeline>` 를 통해 전단된다.
    * 각종 제약 (constraint)
        * 이 클래스는 :mod:`제약 <benchmon.benchmark.constraints>` 을 여러개 가지며,
          벤치마크의 실행 전후로 자원 할당 등과 같은 역할들을 담당하게 할 수 있다.
        * 벤치마크에 `제약` 을 등록하는 방법은 각 벤치마크의 빌더를 통해서 할 수 있다.
            * 즉 벤치마크 생성시 한번에 모든 `제약` 을 등록 해야하며, 중간에 `제약` 을 추가하거나 삭제할 수 없다.

    .. todo::

        * :meth:`start_and_pause` 나 :meth:`monitor` 메소드의 용례와 이름이 부정확하며 서로간의 호출 순서가 존재한다.
    """
    __slots__ = ('_bench_config', '_identifier', '_monitors', '_constraints',
                 '_pipeline', '_log_path', '_context_variable')

    Builder: ClassVar[Type[_BLD_T]]

    _FILE_FORMATTER: ClassVar[ColoredFormatter] = ColoredFormatter(
        '%(asctime)s.%(msecs)03d [%(levelname)s] (%(funcName)s:%(lineno)d in %(filename)s) $ %(message)s'
    )
    _registered_bench: ClassVar[MutableMapping[str,
                                               Type[BaseBenchmark]]] = dict()
    _NICKNAME: ClassVar[str]

    _bench_config: _CFG_T
    _identifier: str
    _monitors: Tuple[_MON_T, ...]
    _constraints: Tuple[_CST_T, ...]
    _pipeline: BasePipeline
    _log_path: Path
    _context_variable: Context

    @classmethod
    def of(cls, context: Context) -> BaseBenchmark:
        # noinspection PyProtectedMember
        for c, v in context._variable_dict.items():
            if issubclass(c, cls):
                return v

        raise BenchNotFoundError(
            'Context variable should have exactly one Benchmark')

    @classmethod
    def register_nickname(cls, bench: Type[BaseBenchmark]) -> None:
        """
        벤치마크 `bench` 를 등록한다.
        이 클래스의 모든 자식 클래스는 이 메소드를 통해서 자신을 파서로 등록해야 benchmon에서 자동으로 그 벤치마크를 찾을 수 있다.

        :param bench: 등록할 벤치마크
        :type bench: typing.Type[benchmon.benchmark.base.BaseBenchmark]
        """
        if not issubclass(bench, BaseBenchmark):
            raise ValueError(f'{bench} is not registrable benchmark')
        elif bench._NICKNAME in cls._registered_bench:
            raise ValueError(
                f'{bench} has types that overlap with existing registered types: '
                f'{cls._registered_bench[bench._NICKNAME]}')

        cls._registered_bench[bench._NICKNAME] = bench

    @classmethod
    def get_bench_from_nickname(cls,
                                bench_nickname: str) -> Type[BaseBenchmark]:
        """
        등록된 벤치마크 클래스를 찾아온다.

        :raises ValueError: benchmon에 등록된 벤치마크 중에서 `bench_nickname` 을 찾을 수 없을 때

        :param bench_nickname: 찾을 벤치마크 타입
        :type bench_nickname: str
        :return: 찾아진 `bench_nickname` 의 클래스
        :rtype: typing.Type[benchmon.benchmark.base.BaseBenchmark]
        """
        if bench_nickname not in cls._registered_bench:
            raise ValueError(
                f'{bench_nickname} is not a registered benchmark type')

        return cls._registered_bench[bench_nickname]

    @classmethod
    def _waits(
        cls, iterable: Iterable[Union[Future, Coroutine]]
    ) -> Future[Tuple[Set[Future], Set[Future]]]:
        return asyncio.wait(tuple(iterable))

    def __init__(self, bench_config: _CFG_T, constraints: Tuple[_CST_T, ...],
                 monitors: Tuple[_MON_T, ...], pipeline: BasePipeline) -> None:
        self._bench_config = bench_config
        self._identifier = bench_config.identifier

        self._monitors = monitors
        self._constraints = constraints
        self._pipeline = pipeline
        self._context_variable = Context()

        # setup for logger
        self._log_path = bench_config.workspace / 'logs' / f'{bench_config.identifier}.log'

    async def start_and_pause(self, silent: bool = False) -> None:
        """
        벤치마크를 실행 하였다가, 실제로 벤치마크가 실행되어 프로세스가 생성되면 멈춘다.
        벤치마크를 실행하기 전에 :mod:`제약 <benchmon.benchmark.constraints>` 나 logger 등을 초기화 한다.

        .. note::
            * 한번 :meth:`start_and_pause` 를 통해 실행되었다가 정상 종료된 벤치마크의 경우,
              다시 한번 :meth:`start_and_pause` 를 통해 재활용할 수 있도록 구현 되어야 한다.

        :param silent: ``True`` 일 경우 화면에 출력되는 로그를 없앤다.
        :type silent: bool
        """
        self._remove_logger_handlers()

        # setup for loggers
        logger = logging.getLogger(self._identifier)

        privilege_cfg = PrivilegeConfig.of(self._context_variable).result

        with drop_privilege(privilege_cfg.user, privilege_cfg.group):
            self._log_path.parent.mkdir(parents=True, exist_ok=True)

            fh = logging.FileHandler(self._log_path, mode='w')
            fh.setFormatter(BaseBenchmark._FILE_FORMATTER)
            fh.setLevel(logging.DEBUG)
            logger.addHandler(fh)

        if not silent:
            stream_handler = logging.StreamHandler()
            formatter: ColoredFormatter = ColoredFormatter(
                f'%(asctime)s.%(msecs)03d [%(levelname)-8s] %(name)-{self._bench_config.width_in_log}s '
                f'$ %(message)s')
            stream_handler.setFormatter(formatter)
            logger.addHandler(stream_handler)

        # noinspection PyBroadException
        try:
            # initialize constraints & pipeline
            if len(self._constraints) is not 0:
                await self._waits(
                    con.on_init(self._context_variable)
                    for con in self._constraints)
                logger.debug('Constraints are initialized')

            await self._pipeline.on_init(self._context_variable)
            logger.debug('Pipe is initialized')

            logger.info('Starting benchmark...')
            await self._start(self._context_variable)
            logger.info(f'The benchmark has started. pid : {self.pid}')

            logger.debug('Pausing benchmark...')
            self.pause()

        except asyncio.CancelledError:
            logger.debug(f'The task cancelled')
            if self.is_running:
                await self.kill()

            await self._destroy()

        except Exception as e:
            logger.critical(
                f'The following errors occurred during startup : {e}', e)
            if self.is_running:
                await self.kill()

            await self._destroy()

    async def monitor(self) -> None:
        """
        이 객체가 이미 실행된 후에 등록된 모니터로부터 벤치마크의 모니터링을 시작한다.
        """
        logger = logging.getLogger(self._identifier)
        logger.info('Start monitoring...')

        monitoring_tasks: Optional[asyncio.Task] = None

        # noinspection PyBroadException
        try:
            if len(self._constraints) is not 0:
                logger.debug('Initializing constraints...')
                await self._waits(
                    con.on_start(self._context_variable)
                    for con in self._constraints)
                logger.debug('Initializing of constraints are done')
            else:
                logger.debug(
                    'Skips initialization of constraints because there are no registered constraints'
                )

            logger.debug('Initializing monitors...')
            await self._waits(
                mon.on_init(self._context_variable) for mon in self._monitors)
            logger.debug('Initializing of monitors are done')

            logger.debug('Starting monitoring...')
            monitoring_tasks = asyncio.create_task(
                self._waits(
                    mon.monitor(self._context_variable)
                    for mon in self._monitors))

            logger.debug('Wait for benchmark or monitors to finish')
            await asyncio.wait((self.join(), monitoring_tasks),
                               return_when=asyncio.FIRST_COMPLETED)

            if self.is_running:
                logger.debug(
                    'Monitoring is complete first. Wait for the benchmark to finish.'
                )
                await self.join()
            else:
                logger.debug(
                    'The benchmark was completed first. Stopping monitor...')
                await self._waits(mon.stop() for mon in self._monitors)
                logger.debug('Waiting for monitors to finish...')
                await monitoring_tasks
                logger.debug('Monitors are complete')

            logger.info('The benchmark has been successfully completed.')

        except asyncio.CancelledError:
            logger.info('Monitoring has been canceled.')

            if self.is_running:
                logger.debug('Attempt to force termination of benchmark...')
                await self.kill()
                logger.debug('Benchmark has been aborted')

            if monitoring_tasks is not None and not monitoring_tasks.done():
                logger.debug('Stopping monitor...')
                await self._waits(mon.stop() for mon in self._monitors)
                logger.debug('Waiting for monitors to finish...')
                await monitoring_tasks
                logger.debug('Monitors are complete')

        except Exception as e:
            logger.critical(
                f'The following errors occurred during monitoring : {e}', e)

            if self.is_running:
                logger.debug(
                    'Attempt to force termination of benchmark due to error...'
                )
                await self.kill()
                logger.debug('Benchmark has been aborted')

            if monitoring_tasks is not None and not monitoring_tasks.done():
                logger.debug('Stopping monitor...')
                await self._waits(mon.stop() for mon in self._monitors)
                logger.debug('Waiting for monitors to finish...')
                await monitoring_tasks
                logger.debug('Monitors are complete')

        finally:
            logger.info('The benchmark is ended.')

            # destroy monitors
            logger.debug('Deinitializing monitors...')
            await self._waits(
                mon.on_end(self._context_variable) for mon in self._monitors)
            logger.debug('Deinitialization of monitors are finished.')
            logger.debug('Destroying monitors...')
            await self._waits(
                mon.on_destroy(self._context_variable)
                for mon in self._monitors)
            logger.debug('Monitor destruction is complete.')

            await self._destroy()

    async def _destroy(self) -> None:
        logger = logging.getLogger(self._identifier)

        # destroy constraints
        if len(self._constraints) is not 0:
            logger.debug('Destroying constraints...')
            await self._waits(
                con.on_destroy(self._context_variable)
                for con in self._constraints)
            logger.debug('Constraint destruction is complete.')
        else:
            logger.debug(
                'Skips destruction of constraints because there are no registered constraints'
            )

        # destroy pipeline
        logger.debug('Deinitializing pipeline...')
        await self._pipeline.on_end(self._context_variable)
        logger.debug('Deinitialization of pipeline is finished.')
        logger.debug('Destroying pipeline...')
        await self._pipeline.on_destroy(self._context_variable)
        logger.debug('Pipeline destruction is complete.')

        self._remove_logger_handlers()

    @abstractmethod
    def pause(self) -> None:
        """
        벤치마크를 일시 정지한다.
        지원하지 않는 벤치마크 종류가 있을 수 있다.
        """
        logging.getLogger(self._identifier).info('pausing...')

    @abstractmethod
    def resume(self) -> None:
        """
        벤치마크를 재시작한다.
        지원하지 않는 벤치마크 종류가 있을 수 있다.
        """
        logging.getLogger(self._identifier).info('resuming...')

    @abstractmethod
    async def join(self) -> None:
        """ 벤치마크가 실행이 종료될 때 까지 이 메소드의 호출자의 실행을 멈춘다. """
        logging.getLogger(self._identifier).info('joining...')

    @abstractmethod
    async def _start(self, context: Context) -> None:
        """ 벤치마크별로 실제 벤치마크의 실행 방법을 서술한다. """
        pass

    @abstractmethod
    async def kill(self) -> None:
        """ 벤치마크의 실행을 멈추고 프로세스를 죽인다. """
        logging.getLogger(self._identifier).info('stopping...')

    def _remove_logger_handlers(self) -> None:
        logger = logging.getLogger(self._identifier)

        for handler in tuple(logger.handlers):  # type: logging.Handler
            logger.removeHandler(handler)
            handler.flush()
            handler.close()

    @property
    def identifier(self) -> str:
        """
        다른 벤치마크들과 이 벤치마크를 구분할 수 있는 유일한 문자열을 반환한다.

        :return: 다른 벤치마크들과 이 벤치마크를 구분할 수 있는 유일한 문자열
        :rtype: str
        """
        return self._identifier

    @property
    @abstractmethod
    def group_name(self) -> str:
        """
        cgroup이나 resctrl 등에서 쓰일 유니크한 문자열

        :return: cgroup이나 resctrl 등에서 쓰일 유니크한 문자열
        :rtype: str
        """
        pass

    @property
    @abstractmethod
    def is_running(self) -> bool:
        """
        벤치마크 프로세스가 실제로 실행중인지를 반환

        :return: 벤치마크 프로세스의 실제 실행 여부
        :rtype: bool
        """
        pass

    @property
    @abstractmethod
    def pid(self) -> Optional[int]:
        """
        벤치마크 프로세스의 PID를 반환한다.

        :return: 벤치마크 프로세스의 PID. 아직 PID가 없다면 ``None``.
        :rtype: typing.Optional[int]
        """
        pass

    @property
    def bench_config(self) -> _CFG_T:
        return self._bench_config

    @abstractmethod
    def all_child_tid(self) -> Tuple[int, ...]:
        pass
コード例 #17
0
ファイル: base.py プロジェクト: isac322/benchmon
    async def start_and_pause(self, silent: bool = False) -> None:
        """
        벤치마크를 실행 하였다가, 실제로 벤치마크가 실행되어 프로세스가 생성되면 멈춘다.
        벤치마크를 실행하기 전에 :mod:`제약 <benchmon.benchmark.constraints>` 나 logger 등을 초기화 한다.

        .. note::
            * 한번 :meth:`start_and_pause` 를 통해 실행되었다가 정상 종료된 벤치마크의 경우,
              다시 한번 :meth:`start_and_pause` 를 통해 재활용할 수 있도록 구현 되어야 한다.

        :param silent: ``True`` 일 경우 화면에 출력되는 로그를 없앤다.
        :type silent: bool
        """
        self._remove_logger_handlers()

        # setup for loggers
        logger = logging.getLogger(self._identifier)

        privilege_cfg = PrivilegeConfig.of(self._context_variable).result

        with drop_privilege(privilege_cfg.user, privilege_cfg.group):
            self._log_path.parent.mkdir(parents=True, exist_ok=True)

            fh = logging.FileHandler(self._log_path, mode='w')
            fh.setFormatter(BaseBenchmark._FILE_FORMATTER)
            fh.setLevel(logging.DEBUG)
            logger.addHandler(fh)

        if not silent:
            stream_handler = logging.StreamHandler()
            formatter: ColoredFormatter = ColoredFormatter(
                f'%(asctime)s.%(msecs)03d [%(levelname)-8s] %(name)-{self._bench_config.width_in_log}s '
                f'$ %(message)s')
            stream_handler.setFormatter(formatter)
            logger.addHandler(stream_handler)

        # noinspection PyBroadException
        try:
            # initialize constraints & pipeline
            if len(self._constraints) is not 0:
                await self._waits(
                    con.on_init(self._context_variable)
                    for con in self._constraints)
                logger.debug('Constraints are initialized')

            await self._pipeline.on_init(self._context_variable)
            logger.debug('Pipe is initialized')

            logger.info('Starting benchmark...')
            await self._start(self._context_variable)
            logger.info(f'The benchmark has started. pid : {self.pid}')

            logger.debug('Pausing benchmark...')
            self.pause()

        except asyncio.CancelledError:
            logger.debug(f'The task cancelled')
            if self.is_running:
                await self.kill()

            await self._destroy()

        except Exception as e:
            logger.critical(
                f'The following errors occurred during startup : {e}', e)
            if self.is_running:
                await self.kill()

            await self._destroy()
コード例 #18
0
class Benchmark:
    class _Decorators:
        @staticmethod
        def ensure_running(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if not self.is_running:
                    raise RuntimeError(
                        f'The benchmark ({self._identifier}) has already ended or never been invoked.'
                        ' Run benchmark first via invoking `run()`!')
                return func(self, *args, **kwargs)

            return decorator

        @staticmethod
        def ensure_not_running(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if self.is_running:
                    raise RuntimeError(
                        f'benchmark {self._bench_driver.pid} is already in running.'
                    )
                return func(self, *args, **kwargs)

            return decorator

        @staticmethod
        def ensure_invoked(func: Callable[['Benchmark', Any], Any]):
            @functools.wraps(func)
            def decorator(self: 'Benchmark', *args, **kwargs):
                if not self._bench_driver.has_invoked:
                    raise RuntimeError(
                        f'benchmark {self._identifier} is never invoked.')
                return func(self, *args, **kwargs)

            return decorator

    _file_formatter = ColoredFormatter(
        '%(asctime)s.%(msecs)03d [%(levelname)s] (%(funcName)s:%(lineno)d in %(filename)s) $ %(message)s'
    )
    _stream_formatter = ColoredFormatter(
        '%(asctime)s.%(msecs)03d [%(levelname)8s] %(name)14s $ %(message)s')

    def __init__(self,
                 identifier: str,
                 bench_config: BenchConfig,
                 perf_config: PerfConfig,
                 rabbit_mq_config: RabbitMQConfig,
                 workspace: Path,
                 logger_level: int = logging.INFO):
        self._identifier: str = identifier
        self._perf_config: PerfConfig = perf_config
        self._rabbit_mq_config: RabbitMQConfig = rabbit_mq_config

        self._bench_driver: BenchDriver = bench_config.generate_driver(
            identifier)
        self._perf: Optional[asyncio.subprocess.Process] = None
        self._end_time: Optional[float] = None

        perf_parent = workspace / 'perf'
        if not perf_parent.exists():
            perf_parent.mkdir()

        self._perf_csv: Path = perf_parent / f'{identifier}.csv'

        log_parent = workspace / 'logs'
        if not log_parent.exists():
            log_parent.mkdir()

        self._log_path: Path = log_parent / f'{identifier}.log'

        # setup for loggers

        logger = logging.getLogger(self._identifier)
        logger.setLevel(logger_level)

        metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
        metric_logger.setLevel(logger_level)

    @_Decorators.ensure_not_running
    async def start_and_pause(self, print_log: bool = False):
        self._remove_logger_handlers()

        # setup for loggers

        logger = logging.getLogger(self._identifier)

        fh = logging.FileHandler(self._log_path, mode='w')
        fh.setFormatter(Benchmark._file_formatter)
        logger.addHandler(fh)

        if print_log:
            stream_handler = logging.StreamHandler()
            stream_handler.setFormatter(Benchmark._stream_formatter)
            logger.addHandler(stream_handler)

        # launching benchmark
        logger.info('Starting benchmark...')
        await self._bench_driver.run()
        logger.info(
            f'The {self._bench_driver.wl_type} benchmark has started. pid : {self._bench_driver.pid}'
        )

        self._pause_bench()

    @_Decorators.ensure_running
    async def monitor(self, print_metric_log: bool = False):
        logger = logging.getLogger(self._identifier)

        try:
            # launching perf
            self._perf = await asyncio.create_subprocess_exec(
                'perf',
                'stat',
                '-e',
                self._perf_config.event_str,
                '-p',
                str(self._bench_driver.pid),
                '-x',
                ',',
                '-I',
                str(self._perf_config.interval),
                stderr=asyncio.subprocess.PIPE)

            # setup for metric logger

            rabbit_mq_handler = RabbitMQHandler(self._rabbit_mq_config,
                                                self._bench_driver.name,
                                                self._bench_driver.wl_type,
                                                self._bench_driver.pid,
                                                self._perf.pid,
                                                self._perf_config.interval)
            rabbit_mq_handler.setFormatter(
                RabbitMQFormatter(self._perf_config.event_names))

            metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')
            metric_logger.addHandler(rabbit_mq_handler)

            if print_metric_log:
                metric_logger.addHandler(logging.StreamHandler())

            with self._perf_csv.open('w') as fp:
                # print csv header
                fp.write(','.join(
                    chain(self._perf_config.event_names,
                          ('wall_cycles', 'llc_size', 'local_mem',
                           'remote_mem'))) + '\n')
            metric_logger.addHandler(logging.FileHandler(self._perf_csv))

            # perf polling loop

            num_of_events = len(self._perf_config.events)
            if self._perf_config.interval < 100:
                proc = await asyncio.create_subprocess_exec(
                    'perf',
                    '--version',
                    stdout=asyncio.subprocess.PIPE,
                    stderr=asyncio.subprocess.DEVNULL)
                version_line, _ = await proc.communicate()
                version_str = version_line.decode().split()[2]  # type: str
                major, minor = map(
                    int,
                    version_str.split('.')[:2])  # type: int, int
                if (major, minor) < (4, 17):
                    # remove warning message of perf from buffer
                    await self._perf.stderr.readline()

            prev_tsc = rdtsc.get_cycles()
            _, prev_local_mem, prev_total_mem = await self._bench_driver.read_resctrl(
            )
            while self._bench_driver.is_running and self._perf.returncode is None:
                record = []
                ignore_flag = False

                for _ in range(num_of_events):
                    raw_line = await self._perf.stderr.readline()

                    line = raw_line.decode().strip()
                    try:
                        value = line.split(',')[1]
                        float(value)
                        record.append(value)
                    except (IndexError, ValueError) as e:
                        ignore_flag = True
                        logger.debug(
                            f'a line that perf printed was ignored due to following exception : {e}'
                            f' and the line is : {line}')

                tmp = rdtsc.get_cycles()
                record.append(str(tmp - prev_tsc))
                prev_tsc = tmp

                llc_occupancy, local_mem, total_mem = await self._bench_driver.read_resctrl(
                )
                record.append(str(llc_occupancy))

                cur_local_mem = local_mem - prev_local_mem
                record.append(str(cur_local_mem))
                prev_local_mem = local_mem

                record.append(
                    str(max(total_mem - prev_total_mem - cur_local_mem, 0)))
                prev_total_mem = total_mem

                if not ignore_flag:
                    metric_logger.info(','.join(record))

            logger.info('end of monitoring loop')

            self._kill_perf()

        except CancelledError as e:
            logger.debug(f'The task cancelled : {e}')
            self._stop()

        finally:
            try:
                self._kill_perf()
                self._bench_driver.stop()
            except (psutil.NoSuchProcess, ProcessLookupError):
                pass

            await self._bench_driver.cleanup()
            logger.info('The benchmark is ended.')
            self._remove_logger_handlers()
            self._end_time = time.time()

    def _pause_bench(self):
        logging.getLogger(self._identifier).info('pausing...')

        self._bench_driver.pause()

    def pause(self):
        self._pause_bench()
        self._perf.send_signal(SIGSTOP)

    @_Decorators.ensure_running
    def resume(self):
        logging.getLogger(self._identifier).info('resuming...')

        self._bench_driver.resume()
        if self._perf is not None and self._perf.returncode is None:
            self._perf.send_signal(SIGCONT)

    def _kill_perf(self):
        if self._perf is not None and self._perf.returncode is None:
            self._perf.kill()
        self._perf = None

    def _stop(self):
        logger = logging.getLogger(self._identifier)
        logger.info('stopping...')

        try:
            self._kill_perf()
            self._bench_driver.stop()
        except (psutil.NoSuchProcess, ProcessLookupError) as e:
            logger.debug(f'Process already killed : {e}')

    def _remove_logger_handlers(self):
        logger = logging.getLogger(self._identifier)
        metric_logger = logging.getLogger(f'{self._identifier}-rabbitmq')

        for handler in tuple(metric_logger.handlers):  # type: Handler
            logger.debug(f'removing metric handler {handler}')
            metric_logger.removeHandler(handler)
            try:
                handler.flush()
                handler.close()
            except:
                logger.exception(
                    'Exception has happened while removing handler form metric logger.'
                )

        for handler in tuple(logger.handlers):  # type: Handler
            logger.removeHandler(handler)
            handler.flush()
            handler.close()

    @property
    @_Decorators.ensure_invoked
    def launched_time(self) -> float:
        return self._bench_driver.created_time

    @property
    def identifier(self) -> str:
        return self._identifier

    @property
    def end_time(self) -> Optional[float]:
        return self._end_time

    @property
    def runtime(self) -> Optional[float]:
        if self._end_time is None:
            return None
        elif self._end_time < self.launched_time:
            return None
        else:
            return self._end_time - self.launched_time

    @property
    def is_running(self) -> bool:
        return self._bench_driver.is_running and (
            self._perf is None or self._perf.returncode is None)
コード例 #19
0
ap.add_argument("-o",
                "--orgs",
                nargs='+',
                required=True,
                help="List of organizations, separated by space")
args = vars(ap.parse_args())

ACCESS_TOKEN = args['token']
WORKING_DIR = os.path.expanduser(args['directory'])
LOG_FILE_PATH = os.path.join(WORKING_DIR, 'backup.log')
org_list = args['orgs']
if not os.path.exists(WORKING_DIR):
    os.makedirs(WORKING_DIR)

LOG_FORMAT = '%(name)s - %(levelname)s - %(message)s'
formatter = ColoredFormatter(LOG_FORMAT)
stream = TqdmHandler()
stream.setLevel(logging.INFO)
stream.setFormatter(formatter)

file_handler = WatchedFileHandler(filename=LOG_FILE_PATH)
file_handler.setFormatter(formatter)

logging.basicConfig(level=logging.INFO,
                    format=LOG_FORMAT,
                    handlers=[stream, file_handler])


def make_archive(source, destination):
    base = os.path.basename(destination)
    name, ext = os.path.splitext(base)
コード例 #20
0
ファイル: logger.py プロジェクト: BenjaminDavison/snowshu
 def _construct_warning_formatter(self) -> ColoredFormatter:
     return ColoredFormatter(fmt=LOGGING_CLI_WARNING_FORMAT,
                             datefmt=LOGGING_DATE_FORMAT,
                             level_styles=self._colored_log_level_styles())
コード例 #21
0
import logging
import logging.handlers
import subprocess

try:
    from coloredlogs import ColoredFormatter
except ImportError:
    ColoredFormatter = logging.Formatter

from configuration import LOG_DIR
from pmc_turbo import root_dir

long_message_format = '%(levelname)-8.8s %(asctime)s - %(name)s.%(funcName)s:%(lineno)d  %(message)s'
short_message_format = '%(levelname)-4.4s %(asctime)s - %(funcName)s:%(lineno)d  %(message)s'
default_handler = logging.StreamHandler()
default_formatter = ColoredFormatter(short_message_format)
long_formatter = logging.Formatter(long_message_format)
default_handler.setFormatter(default_formatter)

pmc_turbo_logger = logging.getLogger('pmc_turbo')
pmc_turbo_logger.setLevel(logging.DEBUG)

KNOWN_LOGGERS = {
    'pipeline',
    'communicator',
    'controller',
    'gse_receiver',
}


def setup_stream_handler(level=logging.INFO):
コード例 #22
0
from redis.exceptions import ResponseError

persistent_vars = {}
module_dir = __path__[0]
working_dir = getcwd()
config = None
help_messages = {}
scheduler = AsyncIOScheduler()
if not scheduler.running:
    scheduler.configure(timezone="Asia/ShangHai")
    scheduler.start()
version = 0.1
logs = getLogger(__name__)
logging_format = "%(levelname)s [%(asctime)s] [%(name)s] %(message)s"
logging_handler = StreamHandler()
logging_handler.setFormatter(ColoredFormatter(logging_format))
root_logger = getLogger()
root_logger.setLevel(ERROR)
root_logger.addHandler(logging_handler)
basicConfig(level=INFO)
logs.setLevel(INFO)

try:
    config = load(open(r"config.yml"), Loader=FullLoader)
except FileNotFoundError:
    logs.fatal(
        "The configuration file does not exist, and a new configuration file is being generated."
    )
    copyfile(f"{module_dir}/assets/config.gen.yml", "config.yml")
    exit(1)
コード例 #23
0
ファイル: logs.py プロジェクト: c0ntrol-x/OldSpeak
def configure_logging(level_name, stream=None, filename=None):
    relevant_loggers = [
        logging.getLogger(),
        logging.getLogger('oldspeak'),
        logging.getLogger('sqlalchemy'),
        logging.getLogger('werkzeug'),
    ]
    available_handlers = []

    logging.Logger.manager.loggerDict.clear()
    level = getattr(logging, bytes(level_name), b'INFO')

    TEST_MODE = os.getenv('TEST_MODE')
    TEST_NOLOGS = TEST_MODE in ['nologs', 'no-logs']

    if TEST_NOLOGS:
        stream = None
        filename = None
        level_name = 'WARNING'

    elif not TEST_NOLOGS:
        stream = sys.stderr
        filename = None
        level_name = 'DEBUG'

    stream_handler = None
    file_handler = None

    if stream:
        stream_handler = logging.StreamHandler(stream=stream)
        available_handlers.append(stream_handler)

    if filename:
        file_handler = logging.FileHandler(filename=filename, encoding='utf-8')
        available_handlers.append(file_handler)

    if not stream and not filename:
        available_handlers.append(logging.NullHandler())

    def setup_logger(logger):
        logger.handlers = []
        logger.setLevel(level=level)
        for handler in available_handlers:
            logger.addHandler(handler)

    if stream_handler:
        stream_handler.setLevel(level)
        stream_handler.setFormatter(
            ColoredFormatter(
                fmt='%(asctime)s %(name)s %(levelname)s %(message)s'))

    if file_handler:
        json_formatter = JsonFormatter(
            '%(levelname)s %(asctime)s %(module)s %(process)d %(message)s %(pathname)s $(lineno)d $(funcName)s'
        )

        file_handler.setLevel(level)
        file_handler.setFormatter(json_formatter)

    for logger in relevant_loggers:
        setup_logger(logger)