예제 #1
0
class logger:
    Filt = logging.Filter("somthing_but_not_empty")
    newConsoleFormat = ('%(message)s')
    defaultConsoleFormat = ('%(name)-15s:: %(message)s')
    colorized_log_messages = True
    colors = {
        'warning': '\033[33m',
        'info': '\033[0m',
        'debug': '\033[36m',
        'error': '\033[31m',
        'reset': '\033[1;0m',
        'start_stop': '\033[32m'
    }

    def __init__(self, name=""):
        self.loggerName = name
        self.log1 = logging.getLogger(self.loggerName)

    def emptyLine(self):
        self.rootLogger = logging.getLogger()
        self.rootLogger.handlers[0].setFormatter(
            logging.Formatter(self.newConsoleFormat))
        self.rootLogger.handlers[1].addFilter(self.Filt)
        self.log1.error("")
        self.rootLogger.handlers[0].setFormatter(
            logging.Formatter(self.defaultConsoleFormat))
        self.rootLogger.handlers[1].removeFilter(self.Filt)

    def debug(self, msg):
        self.message = msg
        if self.colorized_log_messages == True:
            self.log1.debug(self.colors['debug'] + self.message +
                            self.colors['reset'])
        else:
            self.log1.debug(self.message)

    def info(self, msg):
        self.message = msg
        if self.colorized_log_messages == True:
            self.log1.info(self.colors['info'] + self.message +
                           self.colors['reset'])
        else:
            self.log1.info(self.message)

    def warning(self, msg):
        self.message = msg
        if self.colorized_log_messages == True:
            self.log1.warning(self.colors['warning'] + self.message +
                              self.colors['reset'])
        else:
            self.log1.warning(self.message)

    def error(self, msg):
        self.message = msg
        if self.colorized_log_messages == True:
            self.log1.error(self.colors['error'] + self.message +
                            self.colors['reset'])
        else:
            self.log1.error(self.message)

    def exception(self, msg):
        self.message = msg
        if self.colorized_log_messages == True:
            self.log1.error(self.colors['error'] + self.message +
                            self.colors['reset'])
        else:
            self.log1.error(self.message)

    def start_stop(self, msg, startup_status="DONE"):
        self.message = msg
        self.startup_status = startup_status
        self.rootLogger = logging.getLogger()
        #self.Filt = logging.Filter("somthing_but_not_empty")
        self.rootLogger.handlers[1].addFilter(self.Filt)
        if self.colorized_log_messages == True:
            self.log1.warning(self.message + '\033[40G[' +
                              self.colors['start_stop'] + '  ' +
                              self.startup_status + '  ' +
                              self.colors['reset'] + ']')
        else:
            self.log1.warning(self.message + '\033[40G[  ' +
                              self.startup_status + '  ]')
        self.rootLogger.handlers[1].removeFilter(self.Filt)

    def fwstatus(self, msg):
        self.message = msg
        self.rootLogger = logging.getLogger()
        self.rootLogger.handlers[1].addFilter(self.Filt)
        self.rootLogger.handlers[0].setFormatter(
            logging.Formatter(self.newConsoleFormat))
        for line in self.message:
            if self.colorized_log_messages == True:
                if "Chain " in line:  #print line in green if it contains "Chain ".
                    self.log1.warning(self.colors['start_stop'] + line +
                                      self.colors['reset'])
                else:
                    self.log1.warning(line)
            else:
                self.log1.warning(line)
        self.rootLogger.handlers[1].removeFilter(self.Filt)
        self.rootLogger.handlers[0].setFormatter(
            logging.Formatter(self.defaultConsoleFormat))

    def colorizeMessages(self, x):
        self.x = x
        self.colorized_log_messages = self.x
예제 #2
0
 def __init__(self, name):
     self.orig_filter = logging.Filter(name)
예제 #3
0
def make_logger(name,
                default_settings,
                level=None,
                write=None,
                log_format=None,
                file_name=None,
                directory=None,
                custom_fields_fn=None,
                logger=None):
    # log_queue = queue.Queue(-1)  # no limit on size
    # queue_handler = handlers.QueueHandler(log_queue)

    if level is None:
        level = default_settings["level"]

    print_handle = logging.StreamHandler()
    print_handle.setLevel(level)

    # listener = handlers.QueueListener(log_queue, print_handle)

    if logger is None:
        logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    # logger.addHandler(queue_handler)

    if log_format is None:
        log_format = default_settings["log_format"]

    if write is None:
        write = default_settings["write"]

    # give the string format to the logger to use for formatting messages
    formatter = logging.Formatter(log_format)
    print_handle.setFormatter(formatter)
    logger.addHandler(print_handle)

    if custom_fields_fn is None:
        custom_fields_fn = default_settings["custom_fields_fn"]

    if custom_fields_fn is not None:
        # add custom fields (by default a version field is added)
        stream_filter = logging.Filter()
        stream_filter.filter = custom_fields_fn
        logger.addFilter(stream_filter)

    # initialize a default log file name and directory if none are specified
    if file_name is None:
        file_name = default_settings["file_name"]

        if directory is None:
            # only use default if both directory and file_name are None.
            # Assume file_name has the full path if directory is None
            directory = default_settings["directory"]

    if "%" in file_name:
        file_name = time.strftime(file_name)

    directory_parts = []
    directory = os.path.normpath(directory)
    for part in directory.split(os.sep):
        if "%(name)s" in part:
            directory_parts.append(part % dict(name=name))

        elif "%" in directory:
            directory_parts.append(time.strftime(part))

        else:
            directory_parts.append(part)

    directory = os.path.join(*directory_parts)

    # make directory if writing a log, if directory evaluates True, and if the directory doesn't exist
    if write and directory and not os.path.isdir(directory):
        os.makedirs(directory)
        logger.debug("Creating log directory: %s" % directory)

    # if writing a log, initialize the logging file handle
    if write:
        log_path = os.path.join(directory, file_name)
        file_handle = logging.FileHandler(log_path, "w+")
        file_handle.setLevel(logging.DEBUG)
        file_handle.setFormatter(formatter)
        logger.addHandler(file_handle)

        logger.debug("Logging to: %s" % log_path)

    # listener.start()
    return logger, file_name, directory
예제 #4
0
def run(kwargs):
    global PROFILER
    mpi.init_mpi()
    np.set_printoptions(threshold=sys.maxsize)
    process_defaults(kwargs)

    log_info = kwargs['log_info']
    log_files = kwargs['log_files']

    if log_info != '':
        log_files = list(filter(None, log_files.split(':')))

        # Create formatter
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        # Configure root logger
        root_logger = logging.getLogger()
        numeric_level = getattr(logging, log_info.upper(), None)
        if not isinstance(numeric_level, int):
            raise ValueError('Invalid log level: %s' % log_info)
        root_logger.setLevel(numeric_level)

        # Create handlers
        for log_file in log_files:
            handler = logging.StreamHandler()
            handler.setLevel(logging.DEBUG)
            handler.setFormatter(formatter)
            handler.addFilter(logging.Filter(name=log_file))
            root_logger.addHandler(handler)

    base_path = kwargs['base_path']
    fail_on_duplicate = kwargs['fail_on_duplicate']
    continue_run = bool(kwargs['continue'])

    kwargs['cell_trajectories_file'] = ''
    if continue_run:
        assert kwargs['expl_state'] is None
        assert kwargs['load_path'] == ''
        assert kwargs['cell_trajectories_file'] == ''
        if os.path.exists(base_path):
            path_to_load, num = find_checkpoint(base_path)
            if path_to_load is not None and num is not None:
                kwargs['expl_state'] = os.path.join(
                    path_to_load, num + ARCHIVE_POSTFIX + compress_suffix)
                kwargs['load_path'] = os.path.join(path_to_load,
                                                   num + MODEL_POSTFIX)
                kwargs['cell_trajectories_file'] = os.path.join(
                    path_to_load, num + TRAJ_POSTFIX)
                local_logger.info(
                    f'Successfully loading from checkpoint: {kwargs["expl_state"]} {kwargs["load_path"]} '
                    f'{kwargs["cell_trajectories_file"]}')
        if kwargs['expl_state'] is None or kwargs['load_path'] == '':
            kwargs['expl_state'] = None
            kwargs['load_path'] = ''
            kwargs['cell_trajectories_file'] = ''
            local_logger.warning(
                f'No checkpoint found in: {kwargs["base_path"]} starting new run.'
            )

    if os.path.exists(base_path) and fail_on_duplicate:
        raise Exception('Experiment: ' + base_path + ' already exists!')

    # We need to setup the MPI environment before performing any data processing
    nb_cpu = 2
    session, master_seed = hrv_and_tf_init(nb_cpu, kwargs['nb_envs'],
                                           kwargs['seed'])
    with session.as_default():
        worker_seed_start = master_seed + 1
        kwargs['seed'] = worker_seed_start

        # Process load path
        kwargs['model_path'] = kwargs['load_path']

        # Process profile
        profile = kwargs['profile']

        # Only one process should write information about our experiment
        if hvd.rank() == 0:
            cur_id = 0
            if os.path.exists(base_path):
                current = glob.glob(base_path + '/*')
                for c in current:
                    try:
                        idx, _ = c.split('/')[-1].split('_')
                        idx = int(idx)
                        if idx >= cur_id:
                            cur_id = idx + 1
                    except ValueError:
                        pass
                    except IndexError:
                        pass
            base_path = f'{base_path}/{cur_id:04d}_{uuid.uuid4().hex}/'
            os.makedirs(base_path, exist_ok=True)

            info = copy.copy(kwargs)
            info['version'] = VERSION
            code_hash = get_code_hash()
            info['code_hash'] = code_hash
            local_logger.info(f'Code hash: {code_hash}')
            json.dump(info, open(base_path + '/kwargs.json', 'w'), indent=4)
            kwargs['base_path'] = base_path
            local_logger.info(f'Experiment running in {base_path}')
        else:
            base_path = None
        base_path = mpi.COMM_WORLD.bcast(base_path, root=0)
        kwargs['base_path'] = base_path

        if profile:
            PROFILER = cProfile.Profile()
            PROFILER.enable()

        temp_dir = tempfile.mkdtemp(prefix='tmp_goexplore_')
        kwargs['temp_dir'] = temp_dir
        try:
            _run(**kwargs)
            if PROFILER is not None:
                PROFILER.disable()
        finally:
            try:
                # delete directory
                shutil.rmtree(temp_dir)
            except OSError as exc:
                # ENOENT - no such file or directory
                if exc.errno != errno.ENOENT:
                    raise
            if PROFILER is not None:
                PROFILER.print_stats()
예제 #5
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)
    if args.options is not None:
        cfg.merge_from_dict(args.options)

    # set cudnn_benchmark
    if cfg.get("cudnn_benchmark", False):
        torch.backends.cudnn.benchmark = True

    # work_dir is determined in this priority: CLI > segment in file > filename
    if args.work_dir is not None:
        # update configs according to CLI args if args.work_dir is not None
        cfg.work_dir = args.work_dir
    elif cfg.get("work_dir", None) is None:
        # use config filename as default work_dir if cfg.work_dir is None
        cfg.work_dir = osp.join("./work_dirs",
                                osp.splitext(osp.basename(args.config))[0])
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.gpu_ids is not None:
        cfg.gpu_ids = args.gpu_ids
    else:
        cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)

    if args.autoscale_lr:
        # apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
        cfg.optimizer["lr"] = cfg.optimizer["lr"] * len(cfg.gpu_ids) / 8

    # init distributed env first, since logger depends on the dist info.
    if args.launcher == "none":
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)

    # create work_dir
    mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
    # init the logger before other steps
    timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    log_file = osp.join(cfg.work_dir, f"{timestamp}.log")
    logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)

    # add a logging filter
    logging_filter = logging.Filter("mmdet")
    logging_filter.filter = lambda record: record.find("mmdet") != -1

    # init the meta dict to record some important information such as
    # environment info and seed, which will be logged
    meta = dict()
    # log env info
    env_info_dict = collect_env()
    env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()])
    dash_line = "-" * 60 + "\n"
    logger.info("Environment info:\n" + dash_line + env_info + "\n" +
                dash_line)
    meta["env_info"] = env_info

    # log some basic info
    logger.info(f"Distributed training: {distributed}")
    # logger.info(f'Config:\n{cfg.pretty_text}')

    # set random seeds
    if args.seed is not None:
        logger.info(f"Set random seed to {args.seed}, "
                    f"deterministic: {args.deterministic}")
        set_random_seed(args.seed, deterministic=args.deterministic)
    cfg.seed = args.seed
    meta["seed"] = args.seed

    model = build_detector(cfg.model,
                           train_cfg=cfg.get("train_cfg"),
                           test_cfg=cfg.get("test_cfg"))

    # logger.info(f'Model:\n{model}')

    datasets = [build_dataset(cfg.data.train)]
    if len(cfg.workflow) == 2:
        val_dataset = copy.deepcopy(cfg.data.val)
        val_dataset.pipeline = cfg.data.train.pipeline
        datasets.append(build_dataset(val_dataset))
    if cfg.checkpoint_config is not None:
        # save mmdet version, config file content and class names in
        # checkpoints as meta data
        cfg.checkpoint_config.meta = dict(
            mmdet_version=__version__,
            config=cfg.pretty_text,
            CLASSES=datasets[0].CLASSES,
        )
    # add an attribute for visualization convenience
    model.CLASSES = datasets[0].CLASSES

    train_detector(
        model,
        datasets,
        cfg,
        args,
        distributed=distributed,
        validate=(not args.no_validate),
        timestamp=timestamp,
        meta=meta,
    )
예제 #6
0
    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(
            self.stream, (options.verbose == 2),
            number_of_columns=SystemHost().platform.terminal_width())

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if (record.name.startswith("webkitpy.common.system.autoinstall")
                    or record.name.startswith("webkitpy.test")):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info(
            "Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)

        if self.options.pass_through:
            # FIXME: Can't import at top of file, as outputcapture needs unittest2
            from webkitpy.common.system import outputcapture
            outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
예제 #7
0
    for key in cachet_mandatory_fields:
        if key not in config_data["cachet"]:
            fatal_error("Missing cachet mandatory fields")


def fatal_error(message: str):
    logging.getLogger("cachet_url_monitor.scheduler").fatal("%s", message)
    sys.exit(1)


if __name__ == "__main__":
    FORMAT = "%(levelname)9s [%(asctime)-15s] %(name)s - %(message)s"
    logging.basicConfig(format=FORMAT, level=logging.INFO)
    for handler in logging.root.handlers:
        handler.addFilter(logging.Filter("cachet_url_monitor"))

    if len(sys.argv) <= 1:
        fatal_error("Missing configuration file argument")
        sys.exit(1)

    try:
        config_data = load(open(sys.argv[1], "r"), SafeLoader)
    except FileNotFoundError:
        fatal_error(f"File not found: {sys.argv[1]}")
        sys.exit(1)

    validate_config()

    webhooks: List[Webhook] = []
    for webhook in config_data.get("webhooks", []):
예제 #8
0
def setup_logger(name=__name__, headless=False):
    """setup logging

    Overridding the default log level(**debug**) can be done via an
    environment variable `UCI_LOGLEVEL`

    Available levels:

    * CRITICAL
    * ERROR
    * WARNING
    * INFO
    * DEBUG

    .. note::

        This filters only cloudinstall logging info. Set your environment
        var to `UCI_NOFILTER` to see debugging log statements from imported
        libraries (ie macumba)

    .. code::

        # Running cloud-status from cli
        $ UCI_LOGLEVEL=INFO openstack-status

        # Disable log filtering
        $ UCI_NOFILTER=1 openstack-status

    :params str name: logger name
    :returns: a log object

    """
    HOME = os.getenv('HOME')
    CONFIG_DIR = '.cloud-install'
    CONFIG_PATH = os.path.join(HOME, CONFIG_DIR)
    if not os.path.isdir(CONFIG_PATH):
        os.makedirs(CONFIG_PATH)
    LOGFILE = os.path.join(CONFIG_PATH, 'commands.log')
    commandslog = TimedRotatingFileHandler(LOGFILE,
                                           when='D',
                                           interval=1,
                                           backupCount=7)
    env = os.environ.get('UCI_LOGLEVEL', 'DEBUG')

    commandslog.setLevel(env)
    commandslog.setFormatter(logging.Formatter(
        "[%(levelname)-4s: %(asctime)s, "
        "%(filename)s:%(lineno)d] %(message)s",
        datefmt='%m-%d %H:%M:%S'))

    if headless:
        consolelog = logging.StreamHandler()
        consolelog.setLevel(logging.INFO)
        consolelog.setFormatter(logging.Formatter(
            '[%(levelname)-4s: %(asctime)s] %(message)s',
            datefmt='%m-%d %H:%M:%S'))

    logger = logging.getLogger('')
    logger.setLevel(env)

    no_filter = os.environ.get('UCI_NOFILTER', None)
    if no_filter is None:
        f = logging.Filter(name='cloudinstall')
        commandslog.addFilter(f)
        if headless:
            consolelog.addFilter(f)
    logger.addHandler(commandslog)
    if headless:
        logger.addHandler(consolelog)

    return logger
예제 #9
0
def setup_log(name):
    """
    初始化日志器
    :param name: 调用者, 期待 __name__
    :return: 直接可用的日志器, 包含控制台输出[除 ERROR 的所有日志]/ALL 文件输出[每日更新]/ERROR 文件输出[大小更新]
    """
    log_dir_check()

    def should_log(record):
        """
        定义日志过滤规则
        :param record: 日志信息,拥有日志的自有属性,如 lineno
        :return: True or False
        """
        if record.levelname not in ["INFO", "WARNING"]:
            return False
        return True

    # 初始化干净的日志器
    logger = logging.getLogger(name)

    logger.setLevel(level=logging.INFO)

    # 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
    _ = '%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - lineno:%(lineno)d - %(message)s'
    formatter = logging.Formatter(_)

    # 创建三个日志记录器,分别为控制台输出, ALL 文件输出, ERROR 文件输出
    # 文件输出指明日志保存的路径、保存的日志文件个数上限、以及各不同日志器的属性
    console = logging.StreamHandler()
    all_handler = TimedRotatingFileHandler("logs/all/all_log.log",
                                           when='midnight',
                                           interval=1,
                                           backupCount=10)
    error_handler = RotatingFileHandler("logs/error/error_log.log",
                                        maxBytes=1024 * 1024 * 100,
                                        backupCount=10)

    # 对日志器等级进行配置
    console.setLevel(logging.INFO)
    all_handler.setLevel(logging.INFO)
    error_handler.setLevel(logging.ERROR)

    # 为刚创建的日志记录器设置日志记录格式
    console.setFormatter(formatter)
    all_handler.setFormatter(formatter)
    error_handler.setFormatter(formatter)

    # 初始化日志过滤器,并添加至指定 handler
    logging_filter = logging.Filter()
    logging_filter.filter = should_log
    console.addFilter(logging_filter)

    # 设置 TimedRotatingFileHandler 后缀名称,跟 strftime 的格式一样
    all_handler.suffix = "%Y-%m-%d_%H-%M-%S.log"

    # 为全局的日志工具对象(flask app使用的)添加日志记录器
    logger.addHandler(console)
    logger.addHandler(all_handler)
    logger.addHandler(error_handler)

    return logger
예제 #10
0
 def __init__(self, names):
     self.filters = [logging.Filter(name) for name in names]
예제 #11
0
# my own modules

import fine_tune

# Get main logger.
logger = logging.getLogger('fine_tune.eval')
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
    datefmt='%Y/%m/%d %H:%M:%S',
    level=logging.INFO
)

# Filter out message not begin with name 'fine_tune'.
for handler in logging.getLogger().handlers:
    handler.addFilter(logging.Filter('fine_tune'))

if __name__ == '__main__':
    # Parse arguments from STDIN.
    parser = argparse.ArgumentParser()

    # Required parameters.
    parser.add_argument(
        '--experiment',
        help='Name of the previous experiment to evalutate.',
        required=True,
        type=str,
    )
    parser.add_argument(
        '--model',
        help='Name of the model to evaluate.',
예제 #12
0
#!/usr/bin/env python

import argparse
import requests
import urllib
import logging

logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s %(message)s',
                    level=logging.INFO,
                    filename='reaper.log')

LOGGER = logging.getLogger('reaper')

for handler in logging.root.handlers:
    handler.addFilter(logging.Filter('reaper'))


def _get_headers(api_key):
    return {'X-Gu-Media-Key': api_key}


def _perform_get(media_api, api_key, until, length):
    query_params = {'until': until, 'length': length, 'persisted': False}

    headers = _get_headers(api_key)

    url = '{0}/images?{1}'.format(media_api, urllib.urlencode(query_params))

    # verify=False to forcefully ignore SSL verification in DEV, which will fail due to cert using custom CA.
    return requests.get(url, headers=headers, verify=False).json()
예제 #13
0
 def __init__(self, blacklist):
     self.blacklist = [logging.Filter(name) for name in blacklist]
예제 #14
0
def get_config(is_local):
    global verbose
    config = {}
    config_path = None
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)-s: %(message)s')
    if is_local:
        shortopts = 'hd:s:b:p:k:l:m:O:o:G:g:c:t:vq'
        longopts = [
            'help', 'fast-open', 'pid-file=', 'log-file=', 'user='******'version'
        ]
    else:
        shortopts = 'hd:s:p:k:m:O:o:G:g:c:t:vq'
        longopts = [
            'help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
            'forbidden-ip=', 'user='******'manager-address=', 'version'
        ]
    try:
        optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
        for key, value in optlist:
            if key == '-c':
                config_path = value
            elif key in ('-h', '--help'):
                print_help(is_local)
                sys.exit(0)
            elif key == '--version':
                print_shadowsocks()
                sys.exit(0)
            else:
                continue

        if config_path is None:
            config_path = find_config()

        if config_path:
            logging.debug('loading config from %s' % config_path)
            with open(config_path, 'rb') as f:
                try:
                    config = parse_json_in_str(
                        remove_comment(f.read().decode('utf8')))
                except ValueError as e:
                    logging.error('found an error in config.json: %s', str(e))
                    sys.exit(1)

        v_count = 0
        for key, value in optlist:
            if key == '-p':
                config['server_port'] = int(value)
            elif key == '-k':
                config['password'] = to_bytes(value)
            elif key == '-l':
                config['local_port'] = int(value)
            elif key == '-s':
                config['server'] = to_str(value)
            elif key == '-m':
                config['method'] = to_str(value)
            elif key == '-O':
                config['protocol'] = to_str(value)
            elif key == '-o':
                config['obfs'] = to_str(value)
            elif key == '-G':
                config['protocol_param'] = to_str(value)
            elif key == '-g':
                config['obfs_param'] = to_str(value)
            elif key == '-b':
                config['local_address'] = to_str(value)
            elif key == '-v':
                v_count += 1
                # '-vv' turns on more verbose mode
                config['verbose'] = v_count
            elif key == '-t':
                config['timeout'] = int(value)
            elif key == '--fast-open':
                config['fast_open'] = True
            elif key == '--workers':
                config['workers'] = int(value)
            elif key == '--manager-address':
                config['manager_address'] = value
            elif key == '--user':
                config['user'] = to_str(value)
            elif key == '--forbidden-ip':
                config['forbidden_ip'] = to_str(value)

            elif key == '-d':
                config['daemon'] = to_str(value)
            elif key == '--pid-file':
                config['pid-file'] = to_str(value)
            elif key == '--log-file':
                config['log-file'] = to_str(value)
            elif key == '-q':
                v_count -= 1
                config['verbose'] = v_count
            else:
                continue
    except getopt.GetoptError as e:
        print(e, file=sys.stderr)
        print_help(is_local)
        sys.exit(2)

    if not config:
        logging.error('config not specified')
        print_help(is_local)
        sys.exit(2)

    config['password'] = to_bytes(config.get('password', b''))
    config['method'] = to_str(config.get('method', 'aes-256-cfb'))
    config['protocol'] = to_str(config.get('protocol', 'origin'))
    config['protocol_param'] = to_str(config.get('protocol_param', ''))
    config['obfs'] = to_str(config.get('obfs', 'plain'))
    config['obfs_param'] = to_str(config.get('obfs_param', ''))
    config['port_password'] = config.get('port_password', None)
    config['additional_ports'] = config.get('additional_ports', {})
    config['additional_ports_only'] = config.get('additional_ports_only',
                                                 False)
    config['timeout'] = int(config.get('timeout', 300))
    config['udp_timeout'] = int(config.get('udp_timeout', 120))
    config['udp_cache'] = int(config.get('udp_cache', 64))
    config['fast_open'] = config.get('fast_open', False)
    config['workers'] = config.get('workers', 1)
    config['pid-file'] = config.get('pid-file', '/var/run/shadowsocksr.pid')
    config['log-file'] = config.get('log-file', '/var/log/shadowsocksr.log')
    config['verbose'] = config.get('verbose', False)
    config['connect_verbose_info'] = config.get('connect_verbose_info', 0)
    config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
    config['local_port'] = config.get('local_port', 1080)
    if is_local:
        if config.get('server', None) is None:
            logging.error('server addr not specified')
            print_local_help()
            sys.exit(2)
        else:
            config['server'] = to_str(config['server'])
    else:
        config['server'] = to_str(config.get('server', '0.0.0.0'))
        config['black_hostname_list'] = to_str(
            config.get('black_hostname_list', '')).split(',')
        if len(config['black_hostname_list']
               ) == 1 and config['black_hostname_list'][0] == '':
            config['black_hostname_list'] = []
        try:
            config['forbidden_ip'] = \
                IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
        except Exception as e:
            logging.error(e)
            sys.exit(2)
        try:
            config['forbidden_port'] = PortRange(
                config.get('forbidden_port', ''))
        except Exception as e:
            logging.error(e)
            sys.exit(2)
        try:
            config['ignore_bind'] = \
                IPNetwork(config.get('ignore_bind', '127.0.0.0/8,::1/128,10.0.0.0/8,192.168.0.0/16'))
        except Exception as e:
            logging.error(e)
            sys.exit(2)
    config['server_port'] = config.get('server_port', 8388)

    logging.getLogger('').handlers = []
    logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
    if config['verbose'] >= 2:
        level = VERBOSE_LEVEL
    elif config['verbose'] == 1:
        level = logging.DEBUG
    elif config['verbose'] == -1:
        level = logging.WARN
    elif config['verbose'] <= -2:
        level = logging.ERROR
    else:
        level = logging.INFO
    verbose = config['verbose']
    logging.basicConfig(
        level=level,
        format=
        '[%(levelname)s] %(asctime)s %(filename)s:%(lineno)s(%(funcName)s) %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    root = logging.getLogger()
    filter_obj = logging.Filter()
    filter_obj.filter = lambda x: x.funcName != 'new_server'
    root.addFilter(filter_obj)
    check_config(config, is_local)

    return config
예제 #15
0
    parser.add_argument('--time_period', required=True, help='Status to check')
    parser.add_argument('--prefix', required=True, help='Start date')
    parser.add_argument('--mail',
                        required=True,
                        help='receivers',
                        nargs='+',
                        default=None)
    parser.add_argument('--log_level',
                        help='Logging level.',
                        required=False,
                        default="INFO")

    try:
        args = parser.parse_args()
    except Exception as e:
        print(e)

    logging.basicConfig(
        stream=sys.stdout,
        format=
        "%(levelname) -5s %(asctime)s %(module)s:%(lineno)s - %(message)s",
        level=logging.getLevelName(args.log_level),
    )
    global logger
    logger = logging.getLogger(__name__)

    for handler in logging.root.handlers:
        handler.addFilter(logging.Filter('__main__'))

    main()
예제 #16
0
        elif isinstance(logger, logging.PlaceHolder):
            cur_colorname = __color('<Logger {}>'.format(loggername), 'white')
        else:
            assert False

        cur_logger = __getHandlers(logger) if include_handlers else {}
        parentlogger[cur_colorname] = cur_logger
        loggers[loggername] = cur_logger

    LA = asciitree.LeftAligned()
    print('\n{}\n'.format(LA(tree)))


if __name__ == '__main__':

    logging.basicConfig()

    logging.getLogger('a.b')
    logging.getLogger('a.b.c.d').addHandler(logging.FileHandler('/tmp/my.log'))
    logging.getLogger('a.b.c.d').handlers[0].addFilter(logging.Filter())

    logging.getLogger('a.b').addHandler(logging.StreamHandler())
    logging.getLogger('a.f')

    import logging.handlers  # More handlers are in here
    logging.getLogger('x.y').addHandler(
        logging.handlers.DatagramHandler('192.168.1.3', 9999))
    logging.root.addFilter('a.b.c')

    print_logging_tree(include_handlers=True)
예제 #17
0
    def __init__(self, conffile):
        super(Scanner, self).__init__()

        config = json.load(open(conffile, 'r'))
        # 1. init globalVar.config first
        globalVar.config = config
        pprint(globalVar.config['global'])

        # 2.
        self.server = config['global']['server']
        self.token = config['global']['token']
        # 注意targetname直接在config的key,而不是config['global']的key
        self.targetname = config['targetname']
        self.target = config['global']['target']
        self.threads = int(
            config['global']['threads']
        ) if config['global']['threads'] != '' else multiprocessing.cpu_count(
        )
        print 'self.threads=', self.threads, type(self.threads)
        # print "config['global']['gatherdepth']=",config['global']['gatherdepth']
        self.gatherdepth = int(
            config['global']
            ['gatherdepth']) if config['global']['gatherdepth'] != '' else 0
        # print 'self.gatherdepth=',self.gatherdepth
        self.loglevel = config['global']['loglevel'] if config['global'][
            'threads'] == '' else 'INFO'
        self.args = {
            'loglevel': self.loglevel,
            'threads': self.threads,
            'gatherdepth': self.gatherdepth
        }
        self.pluginargs = config['plugins']

        # web接口
        self.web_interface = None
        if self.server and self.token:
            self.web_interface = WebInterface(self.server, self.token)
        # 任务
        self.services = []
        # 扫描结果
        self.result = {}
        # pluginLoaders

        self.pls = []

        # 3. init logging
        self.loghandler = []
        # log 模块,确保赋值一次
        if globalVar.mainlogger is None:
            globalVar.mainlogger = logging.getLogger('main')
            if self.loglevel == 'DEBUG':
                globalVar.mainlogger.setLevel(logging.DEBUG)
            else:
                globalVar.mainlogger.setLevel(logging.INFO)

            #	logging handler
            formatter = logging.Formatter(
                '[%(process)d] - [%(levelname)s] - %(message)s')
            # 创建一个handler,用于写入日志文件
            filepath = BASEDIR + '/output/log/' + genFilename(
                self.targetname) + '.log'
            if os.path.isfile(filepath):
                os.remove(filepath)
            fh = logging.FileHandler(filepath, 'a')
            # 再创建一个handler,用于输出到控制台
            ch = logging.StreamHandler()

            fi = logging.Filter('main')

            fh.addFilter(fi)
            ch.addFilter(fi)

            fh.setFormatter(formatter)
            ch.setFormatter(formatter)

            self.loghandler.append(ch)
            self.loghandler.append(fh)

            self._initLogging()

        globalVar.mainlogger.info('[*] Start a new scan')
        globalVar.mainlogger.info('\tserver\t=%s' % self.server)
        globalVar.mainlogger.info('\ttoken\t=%s' % self.token)
        globalVar.mainlogger.info('\ttarget\t=%s' % self.target)
        globalVar.mainlogger.info('\tthreads\t=%d' % self.threads)

        # 注意:不能通过以下的方式进行清空
        # globalVar.undone_targets = []
        tmpundone = copy.deepcopy(globalVar.undone_targets)
        for each_target in tmpundone:
            globalVar.undone_targets.remove(each_target)
예제 #18
0
 def test_add_filter(self):
     l_filter = logging.Filter()
     self.m_obj.add_filter("f1", l_filter)
     self.m_obj.add_filter("f2", l_filter)
     with self.assertRaises(error.XtdError):
         self.m_obj.add_filter("f2", l_filter)
예제 #19
0
	def __init__(self,server=None,token=None,target=None,pluginfilepath=None,pluginargs=None,threads=None,loglevel='INFO'):
		super(PluginMultiRunner, self).__init__()
		self.server = server
		self.token = token
		self.target = target
		self.pluginfilepath = BASEDIR +'/' +pluginfilepath
		self.pluginargs = pluginargs
		if threads and type(threads) == int:
			self.threads = int(threads)
		else:
			self.threads = multiprocessing.cpu_count()
		self.loglevel = loglevel
		self.args = {'loglevel':self.loglevel,'threads':self.threads}

		# web接口
		self.web_interface = None
		if server and token:
			self.web_interface = WebInterface(server,token)
		# 任务
		self.services = []
		# 扫描结果
		self.result = {}
		# pluginLoaders
		
		self.pls = []

		# log 模块
		globalVar.mainlogger = logging.getLogger('main')
		if loglevel == 'DEBUG':
			globalVar.mainlogger.setLevel(logging.DEBUG)
		else:
			globalVar.mainlogger.setLevel(logging.INFO)

		# 定义handler的输出格式formatter    
		# formatter = logging.Formatter('%(asctime)s - %(name)s - [%(levelname)s] - %(message)s')  
		formatter1 = logging.Formatter('[%(process)d] - [%(levelname)s] - %(message)s')  
		formatter2 = logging.Formatter('%(message)s')  
		# 创建一个handler,用于写入日志文件  
		filepath = BASEDIR+'/output/scan.log'
		if os.path.isfile(filepath):
			os.remove(filepath)
		fh = logging.FileHandler(filepath,'a')    
		# 再创建一个handler,用于输出到控制台
		ch = logging.StreamHandler()  

		fi = logging.Filter('main')

		fh.addFilter(fi)
		ch.addFilter(fi)

		fh.setFormatter(formatter1)
		ch.setFormatter(formatter1)

		globalVar.mainlogger.addHandler(fh)
		globalVar.mainlogger.addHandler(ch)

		globalVar.mainlogger.info('[*] Start an new scan')
		globalVar.mainlogger.info('\tserver  =%s' % server)
		globalVar.mainlogger.info('\ttoken   =%s' % token)
		globalVar.mainlogger.info('\ttarget  =%s' % target)
		globalVar.mainlogger.info('\tthreads =%d' % self.threads)
예제 #20
0
파일: _objects.py 프로젝트: kernc/dill
a['TZInfoType'] = datetime.tzinfo()
a['DateTimeType'] = datetime.datetime.today()
a['CalendarType'] = calendar.Calendar()
if not PY3:
    a['SetsType'] = sets.Set()
    a['ImmutableSetType'] = sets.ImmutableSet()
    a['MutexType'] = mutex.mutex()
# numeric and mathematical types (CH 9)
a['DecimalType'] = decimal.Decimal(1)
a['CountType'] = itertools.count(0)
# data compression and archiving (CH 12)
a['TarInfoType'] = tarfile.TarInfo()
# generic operating system services (CH 15)
a['LoggerType'] = logging.getLogger()
a['FormatterType'] = logging.Formatter()  # pickle ok
a['FilterType'] = logging.Filter()  # pickle ok
a['LogRecordType'] = logging.makeLogRecord(_dict)  # pickle ok
a['OptionParserType'] = _oparser = optparse.OptionParser()  # pickle ok
a['OptionGroupType'] = optparse.OptionGroup(_oparser, "foo")  # pickle ok
a['OptionType'] = optparse.Option('--foo')  # pickle ok
if HAS_CTYPES:
    a['CCharType'] = _cchar = ctypes.c_char()
    a['CWCharType'] = ctypes.c_wchar()  # fail == 2.6
    a['CByteType'] = ctypes.c_byte()
    a['CUByteType'] = ctypes.c_ubyte()
    a['CShortType'] = ctypes.c_short()
    a['CUShortType'] = ctypes.c_ushort()
    a['CIntType'] = ctypes.c_int()
    a['CUIntType'] = ctypes.c_uint()
    a['CLongType'] = ctypes.c_long()
    a['CULongType'] = ctypes.c_ulong()
예제 #21
0
 def __init__(self, *whitelist):
     self.whitelist = [_logging.Filter(name) for name in whitelist]
예제 #22
0
import logging

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)

    log_filter = logging.Filter('A')

    logger1 = logging.getLogger('A')
    logger1.debug('This is written to log output')

    logger2 = logging.getLogger('A.B')
    logger2.debug('This is written to log output')

    logger3 = logging.getLogger('B')
    logger3.addHandler(logging.NullHandler())
    logger3.propagate = False
    logger3.debug(
        'This is NOT written to log output, because only names start with "B" are allowed by filter'
    )
예제 #23
0
파일: test.py 프로젝트: sarahwertz/sepal
from sepal.tests.test_timeseries import test_timeseries

import logging

logging.basicConfig(level=logging.DEBUG)
for handler in logging.root.handlers:
    handler.addFilter(logging.Filter('sepal'))
test_timeseries()
예제 #24
0
def main():
    # Some User Agents
    hds = [{
        'User-Agent':
        'Mozilla/5.0 (Windows; U; Windows NT 6 .1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safa'
        'ri/535.11'
    }, {
        'User-Agent':
        'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'
    }, {
        'User-Agent':
        'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'
    }, {
        'User-Agent':
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/44.0'
        '.2403.89 Chrome/44.0.2403.89 Safari/537.36'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Ge'
        'cko) Version/5.1 Safari/534.50'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Vers'
        'ion/5.1 Safari/534.50'
    }, {
        'User-Agent':
        'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'
    }, {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrom'
        'e/17.0.963.56 Safari/535.11'
    }, {
        'User-Agent':
        'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11'
    }, {
        'User-Agent':
        'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11'
    }]
    parser = args_parser(
        sys.argv[0].rpartition('/')[2],
        description=
        'These are the options for the web spider\nAuthor: Tangxing Zhou')
    if len(sys.argv) is 1:
        parser.print_help()
        sys.exit(1)
    else:
        pass
    args = parser.parse_args()
    m_start_url = args.start_url
    m_database = args.data_base
    m_table = args.table
    m_number_of_threads = args.number_of_threads
    m_xpath = args.xpath
    m_xpath_caller = args.xpath_caller
    if m_xpath_caller is None:
        m_xpath_caller = ['DefaultContentsDispatcher'] * len(m_xpath)
    else:
        pass
    m_page_caller = args.page_caller
    m_item = args.item
    if not args.filter == '':
        m_filter = logging.Filter(args.filter)
        for filter_instance in [
                debug_logger, info_logger, warn_logger, error_logger,
                critical_logger
        ]:
            filter_instance.addFilter(m_filter)
    else:
        pass
    m_xpath_and_caller = {}
    if len(m_xpath_caller) is not len(m_xpath) or len(m_item) is not len(
            m_xpath):
        error_logger.error(
            'The xpaths: {} callers: {} items: {} are not corresponding'.
            format(m_xpath, m_xpath_caller, m_item))
        raise ValueError(
            'The xpaths: {} callers: {} items: {} are not corresponding'.
            format(m_xpath, m_xpath_caller, m_item))
    else:
        for index, xpath in enumerate(m_xpath):
            try:
                xpath_caller_instance = eval('{}()'.format(
                    m_xpath_caller[index]))
                m_xpath_and_caller[xpath] = xpath_caller_instance
            except Exception as e:
                error_logger.error(
                    'Failed to create the xpath caller instance of {} for xpath: {} {}'
                    .format(m_xpath_caller[index], m_xpath, e))
    m_db = SqlDB(m_database)
    m_db.create_table(m_table, tuple(), None, *tuple(m_item))
    web_caller = None
    try:
        web_caller = eval('{}({}, "{}", {})'.format(m_page_caller, 'm_db',
                                                    m_table, m_item))
    except Exception as e:
        error_logger.error(
            'Failed to create the web caller instance of {} {}'.format(
                m_page_caller, e))
    m_web_spider = WebSpider(m_start_url,
                             url_generator,
                             m_xpath_and_caller,
                             web_caller,
                             m_number_of_threads,
                             header=hds[random.randint(0,
                                                       len(hds) - 1)])
    m_web_spider.start_threads()
    m_web_spider.wait_threads()
    print(m_db.fetchall('select * from {}'.format(m_table))[0][0])
예제 #25
0
def main():
    """Run the application."""
    # We don't want any warnings to end up impacting output.
    warnings.simplefilter('ignore')

    if DEBUG:
        pid = os.getpid()
        log_filename = 'rbssh-%s.log' % pid

        if DEBUG_LOGDIR:
            log_path = os.path.join(DEBUG_LOGDIR, log_filename)
        else:
            log_path = log_filename

        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s %(name)-18s %(levelname)-8s '
                                   '%(message)s',
                            datefmt='%m-%d %H:%M',
                            filename=log_path,
                            filemode='w')

        debug('%s', sys.argv)
        debug('PID %s', pid)

    # Perform the bare minimum to initialize the Django/Review Board
    # environment. We're not calling Review Board's initialize() because
    # we want to completely minimize what we import and set up.
    if hasattr(django, 'setup'):
        django.setup()

    from reviewboard.scmtools.core import SCMTool
    from reviewboard.ssh.client import SSHClient

    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    ch.setFormatter(logging.Formatter('%(message)s'))
    ch.addFilter(logging.Filter('root'))
    logging.getLogger('').addHandler(ch)

    path, port, command = parse_options(sys.argv[1:])

    if '://' not in path:
        path = 'ssh://' + path

    username, hostname = SCMTool.get_auth_from_uri(path, options.username)

    if username is None:
        username = getpass.getuser()

    client = SSHClient(namespace=options.local_site_name,
                       storage_backend=STORAGE_BACKEND)
    client.set_missing_host_key_policy(paramiko.WarningPolicy())

    if command:
        purpose = command
    else:
        purpose = 'interactive shell'

    debug('!!! SSH backend = %s', type(client.storage))
    debug('!!! Preparing to connect to %s@%s for %s',
          username, hostname, purpose)

    attempts = 0
    password = None

    key = client.get_user_key()

    while True:
        try:
            client.connect(hostname, port, username=username,
                           password=password, pkey=key,
                           allow_agent=options.allow_agent)
            break
        except paramiko.AuthenticationException as e:
            if attempts == 3 or not sys.stdin.isatty():
                logging.error('Too many authentication failures for %s' %
                              username)
                sys.exit(1)

            attempts += 1
            password = getpass.getpass("%s@%s's password: " %
                                       (username, hostname))
        except paramiko.SSHException as e:
            logging.error('Error connecting to server: %s' % e)
            sys.exit(1)
        except Exception as e:
            logging.error('Unknown exception during connect: %s (%s)' %
                          (e, type(e)))
            sys.exit(1)

    transport = client.get_transport()
    channel = transport.open_session()

    if sys.platform in ('cygwin', 'win32'):
        debug('!!! Using WindowsHandler')
        handler = WindowsHandler(channel)
    else:
        debug('!!! Using PosixHandler')
        handler = PosixHandler(channel)

    if options.subsystem == 'sftp':
        debug('!!! Invoking sftp subsystem')
        channel.invoke_subsystem('sftp')
        handler.transfer()
    elif command:
        debug('!!! Sending command %s', command)
        channel.exec_command(' '.join(command))
        handler.transfer()
    else:
        debug('!!! Opening shell')
        channel.get_pty()
        channel.invoke_shell()
        handler.shell()

    debug('!!! Done')
    status = channel.recv_exit_status()
    client.close()

    return status
예제 #26
0
def create_app(config=None):
    app = Flask(__name__)

    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s %(name)s:%(levelname)s: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S %Z')
    logging.Formatter.converter = time.gmtime

    log = logging.getLogger('extractor_run')
    log.addFilter(logging.Filter('extractor_run'))

    configure(app.config, config)

    log.setLevel(logging.getLevelName(app.config['LOG_LEVEL']))

    gdal_version = GdalExtractor.gdal_config(app.config)
    if gdal_version is None:
        log.error('Unable to load GDAL')
        return None

    log.info('Found GDAL Version %s' % gdal_version)

    blueprint = Blueprint('api', __name__, url_prefix='/api')
    api = Api(blueprint, doc='/swagger/')
    app.register_blueprint(blueprint)
    app.config['SWAGGER_UI_JSONEDITOR'] = True

    extractor_api = Namespace('extractor')
    api.add_namespace(extractor_api)

    @extractor_api.route('/health')
    class HealthCheckService(Resource):
        def get(self):
            """
            Health Check
            This endpoint will return the Health of the Service.
            ---
            """
            return make_response(
                jsonify({'Status': 'Oh I, oh, I\'m still alive!'}), 200)

    parser = api.parser()
    parser.add_argument('file_uri', help='fileuri', location='args')
    parser.add_argument('fingerprint', help='fingerprint', location='args')
    parser.add_argument('version', help='version', location='args')

    @extractor_api.route('/extract')
    class ExtractAPIService(Resource):
        # decorators = [jsonp, jwt_user, event_logging]
        @api.expect(parser)
        def get(self):
            """
            Geospatial Extent Extraction (and additional metadata)
            This endpoint extracts the metadata associated with a given fileUri and fingerprint.
            ---
            """
            fileuri = request.args.get('file_uri')
            fingerprint = request.args.get('fingerprint')
            version = request.args.get('version')
            log.info("file_uri %s - fingerprint: %s - version: %s", fileuri,
                     fingerprint, version)

            if fileuri is None:
                log.warning('Missing required request parameter file_uri')
                return make_response(
                    jsonify({'Bad Request': 'file_uri was not provided'}), 400)
            if fingerprint is None:
                return make_response(
                    jsonify({'Bad Request': 'fingerprint was not provided'}),
                    400)

            extractor = GdalExtractor.factory(fileuri, log)

            resp = extractor.extract()
            log.error(resp)
            if resp is None:
                log.error('oh no')
                return make_response(
                    jsonify({'Unsupported Object-type': 'No Metadata Found'}),
                    204)
            else:
                log.error(jsonify(resp))
                return make_response(jsonify(resp))

    @app.route('/extractor')
    def base():
        return "This is the root for the conditioning service!"

    @app.route('/extractor/batch')
    def proc_batch():
        return "This a stub for batch processing!"

    @app.after_request
    def apply_caching(response):
        response.headers["server"] = "ipf.extractor"
        # response.headers["X-Message Of The Day"] = (
        #     "Every normal man must be tempted, at times, to spit on his hands, "
        #     "hoist the black flag, and begin slitting throats")
        response.headers["X-PoweredBy"] = "A Bunch of Fools"
        return response

    @app.errorhandler(404)
    def not_found(error):
        return make_response(jsonify({'error': 'Not found'}), 404)

    return app
예제 #27
0
import logging

_logger = logging.getLogger('txclib')
_logger.setLevel(logging.INFO)

_formatter = logging.Formatter('%(message)s')

_error_handler = logging.StreamHandler(sys.stderr)
_error_handler.setLevel(logging.ERROR)
_error_handler.setFormatter(_formatter)
_logger.addHandler(_error_handler)

_msg_handler = logging.StreamHandler(sys.stdout)
_msg_handler.setLevel(logging.DEBUG)
_msg_handler.setFormatter(_formatter)
_msg_filter = logging.Filter()
_msg_filter.filter = lambda r: r.levelno < logging.ERROR
_msg_handler.addFilter(_msg_filter)
_logger.addHandler(_msg_handler)

logger = _logger


def set_log_level(level):
    """Set the level for the logger.

    Args:
        level: A string among DEBUG, INFO, WARNING, ERROR, CRITICAL.
    """
    logger.setLevel(getattr(logging, level))
예제 #28
0
import logging
import employee

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')

file_handler = logging.FileHandler('sample.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)

# myFilter
myFilter = logging.Filter('__main__')
# myFilter = logging.Filter('MyNewLogger')
file_handler.addFilter(myFilter)

stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)

logger.addHandler(file_handler)
logger.addHandler(stream_handler)

#myNewLogger
myNewLogger = logging.getLogger("MyNewLogger")
myNewLogger.setLevel(logging.DEBUG)

myNewLogger.addHandler(file_handler)
myNewLogger.addHandler(stream_handler)

def add(x, y):
예제 #29
0
def server(instance='.'):
    """run an instance using Python's builtin HTTP server"""

    parser = ArgumentParser(description='run a built-in zoom http server',
                            usage='zoom server [options] instance')

    parser.add_argument("-p",
                        "--port",
                        type=int,
                        default=80,
                        help='http service port')
    parser.add_argument("-n",
                        "--noop",
                        action='store_true',
                        help='use special debugging middleware stack')
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        help='verbose console logging')
    parser.add_argument("-f",
                        "--filter",
                        type=str,
                        default=None,
                        help='log filter')
    parser.add_argument('instance', nargs='?', default=None)
    args = parser.parse_args()

    from zoom.server import run as runweb
    import zoom.middleware
    try:
        if args.instance and not os.path.exists(args.instance):
            print('{!r} is not a valid directory'.format(args.instance))
        else:
            # instance = os.path.abspath(args.instance or instance)
            instance = args.instance
            fmt = '%(asctime)s  %(name)-15s %(levelname)-8s %(message)s'
            con_formatter = logging.Formatter(fmt)
            console_handler = logging.StreamHandler(sys.stdout)
            console_handler.setLevel(logging.WARNING)
            console_handler.setFormatter(con_formatter)

            root_logger = logging.getLogger()
            root_logger.setLevel(logging.DEBUG)
            root_logger.addHandler(console_handler)

            if args.filter:
                console_handler.addFilter(logging.Filter(name=args.filter))

            if args.verbose:
                console_handler.setLevel(logging.DEBUG)

            if args.noop:
                handlers = zoom.middleware.DEBUGGING_HANDLERS
                runweb(port=args.port, instance=instance, handlers=handlers)
            else:
                runweb(port=args.port, instance=instance)
            print('\rstopped')

    except PermissionError:
        print('Permission Error: is port {} in use?\n'
              'use -p <port> to choose a different port'.format(args.port))
예제 #30
0
import argparse
import logging
import os.path

import botocore
from opinel.utils.console import configPrintException

import aws_inventory.config
import aws_inventory.blacklist
import aws_inventory.invoker


# create a module logger and ignore messages outside of the module. botocore was spewing messages
logging.basicConfig()
LOGGER = logging.getLogger(aws_inventory.__name__)
LOGGER.addFilter(logging.Filter(aws_inventory.__name__))

def setup_logging(verbose):
    LOGGER.setLevel(logging.DEBUG if verbose else logging.INFO)

def parse_args(args=None):
    parser = argparse.ArgumentParser(
        description='Discover resources in an AWS account.'
    )

    parser.add_argument('--profile',
                        default='default',
                        help='Name of the profile (default: %(default)s)')

    parser.add_argument('--mfa-serial',
                        help='serial number of MFA device')