예제 #1
0
def init(config={}):
    # Default Config
    # Levels = NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL
    default = {
        'console': {
            'enabled': True,
            'level': 'INFO',
            'colors': True,
            'format': '%(message)s'
        },
        'file': {
            'enabled':
            False,
            'level':
            'INFO',
            'file':
            '/tmp/example.log',
            'format':
            '%(asctime)s.%(msecs)03d | %(levelname)-8s | %(name)s | %(message)s'
        }
    }

    # Merge default and user defined config
    config = {**default, **config}
    if 'console' in config.keys():
        config['console'] = {**default['console'], **config['console']}
    if 'file' in config.keys():
        config['file'] = {**default['file'], **config['file']}

    # New Logger
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)  # from .env

    # New Console Handler
    if config['console']['enabled']:
        ch = logging.StreamHandler(stream=sys.stdout)
        ch.setLevel(config['console']['level'])
        if config['console']['colors']:
            ch.setFormatter(ColoredFormatter(config['console']['format']))
        else:
            ch.setFormatter(
                logging.Formatter(fmt=config['console']['format'],
                                  datefmt='%Y-%m-%d %H:%M:%S'))
        logger.addHandler(ch)

    # New File Handler
    if config['file']['enabled']:
        fh = logging.FileHandler(filename=config['file']['file'], mode='a')
        fh.setLevel(config['file']['level'])
        fh.setFormatter(
            logging.Formatter(fmt=config['file']['format'],
                              datefmt='%Y-%m-%d %H:%M:%S'))
        logger.addHandler(fh)
예제 #2
0
def read_config( config_file_path ):
    with open( config_file_path, 'r' ) as ymlfile:
        config = yaml.load( ymlfile )

    # some validation of config
    if 'zadara_cloud_console' not in config.keys() or 'url' not in config['zadara_cloud_console'].keys():
            logger.critical('missing zadara CLOUD CONSOLE URL config')
            exit( 1 )
    if 'zadara_vpsa' not in config.keys() or 'volume_export_path' not in config['zadara_vpsa'].keys():
            logger.critical('missing zadara volume EXPORT PATH config')
            exit( 1 )
    if 'logging' not in config.keys():
        config['logging'] = None
    return config
예제 #3
0
async def create_challenge_solver(config):
    challenge_solver_name = list(config.keys())[0]

    if challenge_solver_name not in (solver_names := challenge_solvers.keys()):
        raise click.UsageError(
            f"The challenge solver plugin {challenge_solver_name} does not exist. Valid options: "
            f"{', '.join([solver for solver in solver_names])}.")
예제 #4
0
def read_config(config_file_path):
    with open(config_file_path, 'r') as ymlfile:
        config = yaml.load(ymlfile)

    # some validation of config
    if 'zadara_cloud_console' not in config.keys(
    ) or 'url' not in config['zadara_cloud_console'].keys():
        logger.critical('missing zadara CLOUD CONSOLE URL config')
        exit(1)
    if 'zadara_vpsa' not in config.keys(
    ) or 'volume_export_path' not in config['zadara_vpsa'].keys():
        logger.critical('missing zadara volume EXPORT PATH config')
        exit(1)
    if 'logging' not in config.keys():
        config['logging'] = None
    return config
예제 #5
0
파일: duke.py 프로젝트: ch-k/trollduction
    def reload_config(self):
        logger.debug("Reloading config from " + str(self.config_file))
        config = read_config(self.config_file)
        for key, val in config.iteritems():
            # checking for new options in proc
            if key in self.procs_config:
                identical = True
                for key_item, val_item in config[key].iteritems():
                    if(key_item not in self.procs_config[key] or \
                           (self.procs_config[key][key_item] != val_item)):
                        identical = False
                        break
                # checking from deletion of options in proc
                for key_item in self.procs_config[key]:
                    if key_item not in config[key]:
                        identical = False
                        break
                if not identical:
                    # FIXME: a reload might suffice…
                    self.kill(key)
                    pid = self.spawn(key, val)
                    if pid:
                        logger.debug("Updated %s as pid %d", str(key), pid)
                    else:
                        logger.error("Couldn't update " + str(key))
            else:
                pid = self.spawn(key, val)
                if pid:
                    logger.debug("Added %s as pid %d", str(key), pid)

        for key in set(self.procs_config.keys()) - set(config.keys()):
            self.kill(key)
            del self.procs_config[key]
            logger.debug("Removed " + str(key))
        self. procs_config = config
예제 #6
0
    def reload_config(self):
        logger.debug("Reloading config from " + str(self.config_file))
        config = read_config(self.config_file)
        for key, val in config.iteritems():
            # checking for new options in proc
            if key in self.procs_config:
                identical = True
                for key_item, val_item in config[key].iteritems():
                    if(key_item not in self.procs_config[key] or \
                           (self.procs_config[key][key_item] != val_item)):
                        identical = False
                        break
                # checking from deletion of options in proc
                for key_item in self.procs_config[key]:
                    if key_item not in config[key]:
                        identical = False
                        break
                if not identical:
                    # FIXME: a reload might suffice…
                    self.kill(key)
                    pid = self.spawn(key, val)
                    if pid:
                        logger.debug("Updated %s as pid %d", str(key), pid)
                    else:
                        logger.error("Couldn't update " + str(key))
            else:
                pid = self.spawn(key, val)
                if pid:
                    logger.debug("Added %s as pid %d", str(key), pid)

        for key in set(self.procs_config.keys()) - set(config.keys()):
            self.kill(key)
            del self.procs_config[key]
            logger.debug("Removed " + str(key))
        self.procs_config = config
예제 #7
0
def watch(conf):
    '''Set up file watchers'''
    from . import watcher

    events = {
        'on_modified', 'on_created', 'on_deleted', 'on_moved', 'on_any_event'
    }
    for name, config in conf.items():
        _key = cache_key('watch', config)
        if _key in _cache:
            watcher.watch(name, **_cache[_key])
            continue
        if 'paths' not in config:
            app_log.error('watch:%s has no "paths"', name)
            continue
        if not set(config.keys()) & events:
            app_log.error('watch:%s has no events (on_modified, ...)', name)
            continue
        if not isinstance(config['paths'], (list, set, tuple)):
            config['paths'] = [config['paths']]
        for event in events:
            if event in config:
                if not callable(config[event]):
                    config[event] = locate(config[event],
                                           modules=['gramex.transforms'])
                    if not callable(config[event]):
                        app_log.error('watch:%s.%s is not callable', name,
                                      event)
                        config[event] = lambda event: None
        _cache[_key] = config
        watcher.watch(name, **_cache[_key])
예제 #8
0
파일: main.py 프로젝트: tinyboyz/checkin
 def __init__(self, pidfile, cfgfile):
     Daemon.__init__(self, pidfile)
     self.jobs = {}
     self.immediately = False
     self.scheduler = Scheduler(daemonic=False)
     self.logger = logging.getLogger(self.__class__.__name__)
     if os.path.exists(cfgfile):
         with open(cfgfile, 'rt') as f:
             config = yaml.load(f.read())
         for k1 in config.keys():
             if k1 == 'version':
                 pass
             if k1 == 'immediately':
                 self.immediately = config[k1]
             elif k1 == 'taobao':
                 self.jobs[k1] = config[k1]
                 self.jobs[k1]['id'] = None
                 if 'chktime' in self.jobs[k1].keys():
                     self.jobs[k1]['btime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[0], '%H:%M')
                     self.jobs[k1]['etime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[1], '%H:%M')
                     if self.jobs[k1]['btime'] >= self.jobs[k1]['etime']:
                         raise ValueError('"chktime" is illegal')
                 else:
                     raise ValueError('There is no "chktime" be found in configure.')
             else:
                 pass
     else:
         self.logger.error('{0} not found'.format(cfgfile))
예제 #9
0
파일: voltron.py 프로젝트: joeabbey/voltron
def main(debugger=None, dict=None):
    global log, queue, inst, config

    # Configure logging
    logging.config.dictConfig(LOG_CONFIG)
    log = logging.getLogger('')

    # Load config
    config_data = file(os.path.expanduser('~/.voltron')).read()
    lines = filter(lambda x: len(x) != 0 and x[0] != '#', config_data.split('\n'))
    config = json.loads('\n'.join(lines))

    # Set up queue
    queue = Queue.Queue()

    if in_gdb:
        # Load GDB command
        log.debug('Loading GDB command')
        print("Voltron loaded.")
        inst = VoltronGDBCommand()
    elif in_lldb:
        # Load LLDB command
        log.debug('Loading LLDB command')
        inst = VoltronLLDBCommand(debugger, dict)
    else:
        # Set up command line arg parser
        parser = argparse.ArgumentParser()
        parser.add_argument('--debug', '-d', action='store_true', help='print debug logging')
        subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help')

        # Update the view base class
        base = CursesView if 'curses' in config.keys() and config['curses'] else TerminalView
        for cls in TerminalView.__subclasses__():
            cls.__bases__ = (base,)

        # Set up a subcommand for each view class 
        for cls in base.__subclasses__():
            cls.configure_subparser(subparsers)

        # And subcommands for the loathsome red-headed stepchildren
        StandaloneServer.configure_subparser(subparsers)
        GDB6Proxy.configure_subparser(subparsers)

        # Parse args
        args = parser.parse_args()
        if args.debug:
            log.setLevel(logging.DEBUG)

        # Instantiate and run the appropriate module
        inst = args.func(args)
        try:
            inst.run()
        except Exception as e:
            log.error("Exception running module {}: {}".format(inst.__class__.__name__, str(e)))
        except KeyboardInterrupt:
            pass
        inst.cleanup()
        log.info('Exiting')
예제 #10
0
    def set_server(self, ip, port):
        config = self._config
        if 'SERVER' not in config.keys():
            config['SERVER'] = {}
        config['SERVER']['ip'] = ip
        config['SERVER']['port'] = str(port)

        with open(self.filename, 'w') as f:
            config.write(f)
예제 #11
0
    def set_mrouter(self, id, ip, port):
        config = self._config
        if 'MROUTER' not in config.keys():
            config['MROUTER'] = {}
        config['MROUTER']['id'] = id
        config['MROUTER']['ip'] = ip
        config['MROUTER']['port'] = str(port)

        with open(self.filename, 'w') as f:
            config.write(f)
예제 #12
0
def run(config_file, bootstrap_port, path):
    """Starts the app as defined in the config file.

    Starts the app in bootstrap mode if the bootstrap port is set via --bootstrap-port."""
    config = load_config(config_file)

    loop = asyncio.get_event_loop()

    app_config_name = list(config.keys())[0]

    try:
        app_class = server_app_registry.get_plugin(app_config_name)
    except ValueError as e:
        raise click.UsageError(*e.args)

    if bootstrap_port:
        if app_class is AcmeCA:
            raise click.UsageError(
                f"Bootstrapping is not supported for the {app_class} at this moment."
            )

        click.echo(
            f"Starting {app_class.__name__} in bootstrap mode on port {bootstrap_port}"
        )
        app_config = config[app_config_name]

        app_config["port"] = bootstrap_port
        app_config[
            "challenge_validator"] = "dummy"  # Do not validate challenges
        app_config["subnets"] = [
            "127.0.0.1/32",
            "10.110.0.0/24",
        ]  # Only allow localhost and the docker bridge network
        # Bootstrap app does not run behind a reverse proxy:
        app_config["use_forwarded_header"] = False
        app_config["require_eab"] = False
    else:
        click.echo(f"Starting {app_class.__name__}")

    if issubclass(app_class, AcmeRelayBase):
        runner, site = loop.run_until_complete(
            run_relay(config, path, app_class, app_config_name))
    elif app_class is AcmeCA:
        runner, site = loop.run_until_complete(run_ca(config, path))
    else:
        raise ValueError(app_class)

    aiohttp_jinja2.setup(site.app, loader=jinja2.FileSystemLoader("./tpl/"))
    aiohttp_jinja2.get_env(site.app).globals.update({"url_for": _url_for})

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        loop.run_until_complete(runner.cleanup())
예제 #13
0
async def create_challenge_solver(config):
    challenge_solver_name = list(config.keys())[0]

    try:
        challenge_solver_class = challenge_solver_registry.get_plugin(
            challenge_solver_name)
    except ValueError as e:
        raise click.UsageError(*e.args)

    if type((kwargs := config[challenge_solver_name])) is not dict:
        kwargs = {}
예제 #14
0
def update_conf(config, newconfpath):
    """
    Update config dict with new config file
    :param config:
    :param newconfpath:
    :return:
    """
    newconfig = conf_load(newconfpath)
    for section in config.keys():
        if section in newconfig:
            config[section].update(newconfig[section])
    return newconfig
예제 #15
0
def run(config_file, bootstrap_port, path):
    """Starts the app as defined in the config file.

    Starts the app in bootstrap mode if the bootstrap port is set via --bootstrap-port."""
    config = load_config(config_file)

    app_config_name = list(config.keys())[0]

    if app_config_name not in (app_names := server_apps.keys()):
        raise click.UsageError(
            f"Cannot run app '{app_config_name}'. Valid options: "
            f"{', '.join([app for app in app_names])}. "
            f"Please check your config file '{config_file}' and rename the main section accordingly."
        )
예제 #16
0
def main(debugger=None, dict=None):
    global log, queue, inst

    # Load config
    config = {}
    try:
        config_data = file(os.path.expanduser('~/.voltron')).read()
        lines = filter(lambda x: len(x) != 0 and x[0] != '#', config_data.split('\n'))
        config = json.loads('\n'.join(lines))
    except:
        log.debug("No config file")

    # Set up command line arg parser
    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', '-d', action='store_true', help='print debug logging')
    top_level_sp = parser.add_subparsers(title='subcommands', description='valid subcommands')
    view_parser = top_level_sp.add_parser('view', help='display a view')
    view_sp = view_parser.add_subparsers(title='views', description='valid view types', help='additional help')

    # Update the view base class
    base = CursesView if 'curses' in config.keys() and config['curses'] else TerminalView
    for cls in TerminalView.__subclasses__():
        cls.__bases__ = (base,)

    # Set up a subcommand for each view class 
    for cls in base.__subclasses__():
        cls.configure_subparser(view_sp)

    # And subcommands for the loathsome red-headed stepchildren
    StandaloneServer.configure_subparser(top_level_sp)
    GDB6Proxy.configure_subparser(top_level_sp)

    # Parse args
    args = parser.parse_args()
    if args.debug:
        log.setLevel(logging.DEBUG)

    # Instantiate and run the appropriate module
    inst = args.func(args, loaded_config=config)
    try:
        inst.run()
    except Exception as e:
        log.error("Exception running module {}: {}".format(inst.__class__.__name__, str(e)))
    except KeyboardInterrupt:
        pass
    inst.cleanup()
    log.info('Exiting')
예제 #17
0
def check_config(config):
    """
    Lightly check the configuration and issue warnings as appropriate.
    Args:
        config: The loaded configuration.
    """
    UNDERSTOOD_CONFIG_FIELDS = CONFIG_DEFAULTS.keys()

    def check_section(section_name, known_keys, cfgpart=config):
        nonunderstood = set(
            cfgpart[section_name].keys()).difference(known_keys)
        if len(nonunderstood) > 0:
            logger.warning(
                f"The following configuration fields in '{section_name}' "
                f"are not understood: %s",
                nonunderstood,
            )

    nonunderstood = set(config.keys()).difference(UNDERSTOOD_CONFIG_FIELDS)
    if len(nonunderstood) > 0:
        logger.warning(
            "The following configuration sections are not understood: %s",
            nonunderstood)

    check_section("http", {"port", "bind_addresses"})
    check_section("log", {"setup", "access"})
    check_section("access", {"file", "enabled", "x_forwarded_for"},
                  cfgpart=config["log"])
    check_section("metrics", {"opentracing", "sentry", "prometheus"})
    check_section(
        "opentracing",
        {"enabled", "implementation", "jaeger", "service_name"},
        cfgpart=config["metrics"],
    )
    check_section("prometheus", {"enabled", "address", "port"},
                  cfgpart=config["metrics"])
    check_section("sentry", {"enabled", "dsn"}, cfgpart=config["metrics"])

    # If 'db' is defined, it will override the 'database' config.
    if "db" in config:
        logger.warning(
            """The 'db' config field has been replaced by 'database'.
See the sample config for help.""")
    else:
        check_section("database", {"name", "args"})
예제 #18
0
def update_db_config(config):
    """
        Update given configuration with registered service configuration

        :return: When this function is called the service is already registered
                 and passed configuration values are compared with database
                 settings that will have highest priority. Returned value
                 will be the setting extracted from the DB if enabled.
    """
    # Retrieve the service UUID
    fgapisrv_uuid = srv_uuid()
    db_config = fgapisrv_db.srv_config(fgapisrv_uuid)
    for key in config.keys():
        if config[key] != db_config[key]:
            logging.debug("DB configuration overload: conf(%s)='%s'<-'%s'" %
                          (key, config[key], db_config[key]))
            config[key] = db_config[key]
    return config
예제 #19
0
def reconcileINI(args):
    whitelists = {
        "conn_dest_whitelist_ips": {},
        "conn_dest_whitelist_ports": {},
        "smtp_whitelist_source": {},
        "smtp_whitelist_destination": {},
        "conn_src_whitelist_ips": {}
    }

    homedir = os.path.split(sys.argv[0])[0]
    if not args.inifile:
        args.inifile = os.path.join(homedir, "brocess.ini")
    config = configparser.ConfigParser()
    config.read(args.inifile)
    args.logformatline = config.get(
        "main",
        "logformat",
        fallback="[%(asctime)s] [%(filename)s:%(lineno)d] "
        "[%(threadName)s] [%(process)d] [%(levelname)s] - %(message)s")

    if not config:
        return args, whitelists
    if not args.dbtype:
        args.dbtype = config.get("main", "dbtype", fallback=None)
    if not args.database:
        args.database = config.get(args.dbtype, "database", fallback=None)
    if not args.eventlog:
        args.eventlog = config.get("main", "eventlog", fallback=None)
    if not args.connlog:
        args.connlog = config.get("watchlogs", "connlog", fallback=None)
    if not args.smtplog:
        args.smtplog = config.get("watchlogs", "smtplog", fallback=None)
    if not args.httplog:
        args.httplog = config.get("watchlogs", "httplog", fallback=None)

    for whitelist_type in whitelists:
        if whitelist_type in config.keys():
            for item in config[whitelist_type]:
                whitelists[whitelist_type][config.get(whitelist_type,
                                                      item)] = item

    return args, whitelists
예제 #20
0
파일: Config.py 프로젝트: hikerell/Sloth
def setup_config(default_path='project.yaml'):
    secret = None
    path = default_path
    if os.path.exists(path):
        with open(path, 'rt') as f:
            config = yaml.safe_load(f.read())
        setGlobalConfig(config)
    else:
        setGlobalConfig({'X':'Y'})
    config = getGlobalConfig()
    for ckey in config.keys():
        c = config[ckey]
        if 'Secure' in c.keys() and c['Secure']:
            if not secret:
                secret = input('config secret:')
            for k in c.keys():
                if k!='Secure':
                    c[k] = _decrypt(secret, c[k])
                    print('decrypt c[k]: %s' % str(c[k]))
    print(getGlobalConfig())
예제 #21
0
def check_config(config: Dict[str, Any]) -> None:
    """
    Lightly check the configuration and issue warnings as appropriate.
    Args:
        config: The loaded configuration.
    """
    UNDERSTOOD_CONFIG_FIELDS = CONFIG_DEFAULTS.keys()

    def check_section(
        section_name: str, known_keys: Set[str], cfgpart: Dict[str, Any] = config
    ) -> None:
        nonunderstood = set(cfgpart[section_name].keys()).difference(known_keys)
        if len(nonunderstood) > 0:
            logger.warning(
                f"The following configuration fields in '{section_name}' "
                f"are not understood: %s",
                nonunderstood,
            )

    nonunderstood = set(config.keys()).difference(UNDERSTOOD_CONFIG_FIELDS)
    if len(nonunderstood) > 0:
        logger.warning(
            "The following configuration sections are not understood: %s", nonunderstood
        )

    check_section("http", {"port", "bind_addresses"})
    check_section("log", {"setup", "access"})
    check_section(
        "access", {"file", "enabled", "x_forwarded_for"}, cfgpart=config["log"]
    )
    check_section("metrics", {"opentracing", "sentry", "prometheus"})
    check_section(
        "opentracing",
        {"enabled", "implementation", "jaeger", "service_name"},
        cfgpart=config["metrics"],
    )
    check_section(
        "prometheus", {"enabled", "address", "port"}, cfgpart=config["metrics"]
    )
    check_section("sentry", {"enabled", "dsn"}, cfgpart=config["metrics"])
예제 #22
0
    def __init__(self, device_config_file, logger_config_file):
        if not os.path.exists(device_config_file) or \
           not os.path.exists(logger_config_file):
            raise "There do not exist files."
            # TODO: Add raise code for not existing.

        self.filename = device_config_file

        # load device information.
        config = configparser.ConfigParser()
        config.read(device_config_file)

        if 'DEVICE' in config.keys():
            self._device = config['DEVICE']
        self._config = config

        # load loggger config.
        with open(logger_config_file, 'r') as f:
            logger_config = json.load(f)

        logging.config.dictConfig(logger_config)
        self._logger = logging.getLogger('device')
예제 #23
0
def choose_source():
    """
    allow user to select from which source they wish to download from

    :config:  dict of various elements required to trek and download from selected source
    :class:   globals allows us to get a class eg. Westminster which we can then instantiate
    """

    f = open('configs/config.yaml', 'r')
    config = yaml.safe_load(f)
    f.close()
    sources = list(config.keys())

    print("which source would you like to choose from?\n")
    [print('\t', i, x) for i, x in enumerate(sources)]
    selection = int(input('\n'))

    if selection not in range(0, len(sources)):
        print("try again")
        return choose_source()

    name = str(sources[selection])
    return config[name], globals()[name]
예제 #24
0
파일: server.py 프로젝트: qbicsoftware/dync
def _check_config(config):
    for key in ['address', 'storage', 'logging']:
        if key not in config.keys():
            raise ConfigException("Setting missing for: {}".format(key))
예제 #25
0
def setup_logger_yaml(yaml_path: str,
                      separate_default_logs: bool = False,
                      allow_basic: bool = None) -> None:
    """
    This function sets up a logger for the program using a YAML file.\\
    The configuration must be setup with a YAML file.\\
    This method is the best method for using logging in to additional modules.\\

    Default Path Option Notes:
    \t\\- Default file log handler paths are supported.\\
    \t\\- Cross-platform usage can be a pain and require the path to be the full path.\\
    \t\\- Having default enabled allows the program to set the filename for each log handler.\\
    \t\\- This function allows the ability to have all file log handlers log to the same file,\\
    \t   which is named the same name as the main program, or be individual log files\\
    \t   per file hander, which will be named based on the file handler key name.\\
    \t\\- The "filename:" key value has to be "DEFAULT" in call caps to work.

    Additional Default Option Notes:
    \t\\- A user can define DEFAULT path logs by added :<log name> to the end of DEFAULT.\\
    \t\\- All default logs will be at the root of the main program in a folder called logs.\\
    \t\t\\- default YAML example1 = filename: DEFAULT\\
    \t\t\\- default YAML example2 = filename: DEFAULT:mylog

    Usage:
    \t\\- Setup your logger by running the command below.\\
    \t\t\\- logger = logging.getLogger(__name__)\\
    \t\\- Call this function to setup the logger. No return is required.\\
    \t\\- Call the logger using something similar to the command below.\\
    \t\t\\- logger.info('testing')\\
    \t\\- When using the same logger in other modules the only requirement is to run the command\\
    \t   below within the function. Do not run at the module level. This can cause issues.

    Args:
        yaml_path (str):
        \t\\- yaml configuration file.\\
        separate_default_logs (bool, optional):\\
        \t\\- If default file handelers are being used this allows the files to be separated\\
        \t   using the file handler YAML key name.\\
        \t\t\\- Defaults to False.\\
        \t\t\\- Note:\\
        \t\t\t\\- Default log paths per file hander can only be enabled by setting the key value\\
        \t   for filename: to DEFAULT.\\
        allow_basic (bool, optional):\\
        \t\\- Allows the default log level of "INFO" to be used if the YAML file configuration\\
        \t   fails when set to "True".

    Raises:
        FTypeError (fexception):
        \t\\- The value '{yaml_path}' is not in <class 'str'> format.
        FTypeError (fexception):
        \t\\- The value '{separate_default_logs}' is not in <class 'bool'> format.
        FTypeError (fexception):
        \t\\- The value '{allow_basic}' is not in <class 'bool'> format.
        LoggerSetupFailure:
        \t\\- The logging hander failed to create.
        FGeneralError (fexception):
        \t\\- A general exception occurred the logger setup.
    """

    try:
        type_check(yaml_path, str)
        if separate_default_logs:
            type_check(separate_default_logs, bool)
        if allow_basic:
            type_check(allow_basic, bool)
    except FTypeError:
        raise

    # Sets up the logger based on the YAML.
    try:
        # Calls function to pull in YAML configuration.
        config = read_yaml_config(yaml_path, 'FullLoader')

        # #######################################################################
        # ###########Checks/Sets Up Default File Logger Path If Required#########
        # #######################################################################
        # Gets YAML return keys.
        all_keys = list(config.keys())
        # Checks if the log handler is a key.
        if 'handlers' in str(all_keys):
            # Gets all handler keys.
            handler_keys = list(config['handlers'].keys())
            # Loops through each hander key.
            for handler_key in handler_keys:
                # Gets all handler setting keys for the specific handler entry.
                handler_setting_keys = list(
                    config['handlers'][handler_key].keys())
                # Loops through each handler setting.
                for setting_keys in handler_setting_keys:
                    # Checks if one of the keys contains filename to check if it needs the default log path set.
                    if 'filename' in str(setting_keys):
                        # Gets the value from the filename: key.
                        filename_value = config['handlers'][handler_key][
                            'filename']
                        # Checks if the filename value is "DEFAULT" to set the log with the main program name.
                        if 'DEFAULT' == filename_value:
                            # Gets the main program path and file name of the program.
                            # Note: The main program path should not be pulled from the os.path.split command because it does not work correctly on Linux.
                            main_program_path = pathlib.Path.cwd()
                            main_program_file_name = os.path.split(
                                sys.argv[0])[1]
                            # Sets the program log path for the default log path in the YAML.
                            log_path = os.path.abspath(
                                f'{main_program_path}/logs')
                            # Check if main file path exists with a "logs" folder. If not create the folder.
                            # Checks if the save_log_path exists and if not it will be created.
                            # This is required because the logs do not save to the root directory.
                            if not os.path.exists(log_path):
                                os.makedirs(log_path)
                            # Checks if the user wants default log file hander files to be separate.
                            if separate_default_logs:
                                log_file_path = os.path.abspath(
                                    f'{log_path}/{handler_key}.log')
                            else:
                                # Removes the .py from the main program name
                                main_program_name = main_program_file_name.replace(
                                    '.py', '')
                                log_file_path = os.path.abspath(
                                    f'{log_path}/{main_program_name}.log')
                            # Update the file log handler file path to the main root.
                            config['handlers'][handler_key][
                                'filename'] = log_file_path
                        # Checks if the filename value is "DEFAULT:" to set the log with the user defined log name.
                        elif 'DEFAULT:' in filename_value:
                            # Gets the main program path.
                            # Note: The main program path should not be pulled from the os.path.split command because it does not work correctly on Linux.
                            main_program_path = pathlib.Path.cwd()
                            # Sets the program log path for the default log path in the YAML.
                            log_path = os.path.abspath(
                                f'{main_program_path}/logs')
                            # Check if main file path exists with a "logs" folder. If not create the folder.
                            # Checks if the save_log_path exists and if not it will be created.
                            # This is required because the logs do not save to the root directory.
                            if not os.path.exists(log_path):
                                os.makedirs(log_path)
                            # Checks if the user wants default log file hander files to be separate.
                            if separate_default_logs:
                                log_file_path = os.path.abspath(
                                    f'{log_path}/{handler_key}.log')
                            else:
                                # Removes the .py from the main program name
                                # Original Example: DEFAULT:mylog
                                # Returned Example: mylog
                                user_defined_log_name = filename_value.split(
                                    ':')[1]
                                log_file_path = os.path.abspath(
                                    f'{log_path}/{user_defined_log_name}.log')
                            # Update the file log handler file path to the main root.
                            config['handlers'][handler_key][
                                'filename'] = log_file_path
        # Sets the logging configuration from the YAML configuration.
        logging.config.dictConfig(config)
    except Exception as exc:
        # Checks if allow_default is enabled to setup default "Info" logging.
        if allow_basic:
            # Sets the basic logger setup configuration.
            logging.basicConfig(level=logging.INFO)
        else:
            if 'Unable to configure handler' in str(exc):
                exc_args = {
                    'main_message':
                    'The logging hander failed to create.',
                    'custom_type':
                    LoggerSetupFailure,
                    'suggested_resolution':
                    'Please verify YAML file configuration.',
                }
                raise LoggerSetupFailure(FCustomException(exc_args))
            else:
                exc_args = {
                    'main_message':
                    'A general exception occurred the logger setup.',
                    'original_exception': exc,
                }
                raise FGeneralError(exc_args)
예제 #26
0
 def get_mrouter(self):
     config = self._config
     if 'MROUTER' not in config.keys():
         return None
     return config['MROUTER']
예제 #27
0
 def get_server(self):
     config = self._config
     if 'SERVER' not in config.keys():
         return None
     return config['SERVER']
예제 #28
0
def main():
    parser = argparse.ArgumentParser()
    root_dir = pathlib.Path(sys.argv[0]).parent
    parser.add_argument(
        "sites",
        nargs="*",
        help=
        "site name in config yaml file. no input is select all sites. if specified explicitly, it is executed regardless of the 'enable' value."
    )
    parser.add_argument("--view-config", action="store_true")
    parser.add_argument("--config-yaml-filename",
                        default=root_dir / "recipe_crawler_config.yml",
                        type=pathlib.Path)
    parser.add_argument("--work-dir",
                        default=root_dir / ".work_recipes",
                        type=pathlib.Path,
                        help="working directory")
    parser.add_argument("--credential-json-filename",
                        default=root_dir / "recipe_crawler_cred.json",
                        type=pathlib.Path)
    parser.add_argument(
        "--no-check-existed-note",
        action="store_true",
        help=
        "no check existed notes and append new note. if do not check existed note and skip."
    )
    parser.add_argument("--processed-list-filename-postfix",
                        default="_processed_data.txt")
    parser.add_argument(
        "--use-local",
        action="store_true",
        help="store local enex file. do not sync cloud evernote")

    args = parser.parse_args()
    args.work_dir.mkdir(parents=True, exist_ok=True)

    if not args.config_yaml_filename.exists():
        logger.error("not exists config file: {}".format(
            args.config_yaml_filename))
        return

    crawlers = [
        crawler_clazz() for _, crawler_clazz in inspect.getmembers(
            recipe_crawler.crawlers, inspect.isclass)
        if issubclass(crawler_clazz,
                      recipe_crawler.crawlers.bases.RecipeCrawlerTemplate)
        and not inspect.isabstract(crawler_clazz)
    ]
    crawlers_map = dict([(crawler.site_name, crawler) for crawler in crawlers])

    config = yaml.safe_load(args.config_yaml_filename.open("r").read())
    if args.view_config:
        view_results = dict()
        for site, site_config in config.items():
            if len(args.sites):
                for a_site in args.sites:
                    if not a_site in view_results:
                        view_results[a_site] = list()
                    if -1 < site.find(a_site):
                        view_results[a_site].append((site, site_config))
            else:
                if not "" in view_results:
                    view_results[""] = list()
                view_results[""].append((site, site_config))
        pprint.pprint(view_results)
        return

    evernote_cred = _get_evernote_credential(args.credential_json_filename)
    # change_tag_evernote(args, evernote_cred)

    if args.sites is None or len(args.sites) == 0:
        args.sites = [
            key for key in config.keys() if config[key].get("enable", True)
        ]  # True if 'enable' is omitted

    for site in args.sites:
        if site in config and site in crawlers_map:
            site_config = config[site]

            crawler = crawlers_map[site]
            crawler.init(args, site_config)
            recipe_pickle_dir = crawler.cache_dir / "_pickle"
            recipe_pickle_dir.mkdir(parents=True, exist_ok=True)

            if args.use_local:
                logger.info("store local enex")
                enexs = list()
                processed_recipes = list()
                for recipe, (enex_title,
                             enex) in create_enex(crawler.process, args,
                                                  site_config):
                    store_local(recipe_pickle_dir, recipe)
                    enexs.append(enex)
                    processed_recipes.append(recipe)

                enex_dir = args.work_dir / "_enex"
                enex_dir.mkdir(parents=True, exist_ok=True)
                if len(enexs):
                    store_local_enex(enex_dir, site_config["program_name"],
                                     enexs)

                for proecssed_recipe in processed_recipes:
                    with crawler.processed_list_filename.open("a") as fp:
                        fp.write("{}\n".format(proecssed_recipe.id))
            else:
                for recipe in store_evernote(
                        crawler.process,
                        args,
                        site_config,
                        evernote_cred,
                        is_note_exist_check=not args.no_check_existed_note):
                    if recipe:
                        with crawler.processed_list_filename.open("a") as fp:
                            fp.write("{}\n".format(recipe.id))

                        store_local(recipe_pickle_dir, recipe)
        else:
            logger.warning("not exist: {}".format(site))
예제 #29
0
    def __init__(self, config):
        # Default Config
        # Levels from logging._levelToName are
        # {50: 'CRITICAL', 40: 'ERROR', 30: 'WARNING', 20: 'INFO', 10: 'DEBUG', 0: 'NOTSET'}

        # Levels = DEBUG, INFO, WARNING, ERROR, CRITICAL
        default = {
            'console': {
                'enabled': True,
                'level': 'DEBUG',
                'colors': True,
                'format': '%(message)s',
                'filters': [],
                'exclude': [],
            },
            'file': {
                'enabled': False,
                'level': 'DEBUG',
                'file': '/tmp/example.log',
                'when': 'midnight',
                'interval': 1,
                'backup_count': 7,
                'format':
                '%(asctime)s.%(msecs)03d | %(levelname)-8s | %(name)-22s | %(message)s',
                'filters': [],
                'exclude': [],
            }
        }

        # Merge default and user defined config
        config = {**default, **config}
        if 'console' in config.keys():
            config['console'] = {**default['console'], **config['console']}
        if 'file' in config.keys():
            config['file'] = {**default['file'], **config['file']}

        # New Logger
        self._logger = logging.getLogger()
        self._logger.setLevel(logging.DEBUG)
        self._name = None

        # New Console Handler
        if config['console']['enabled']:
            handler = logging.StreamHandler(stream=sys.stdout)
            handler.setLevel(config['console']['level'])
            if config['console']['colors']:
                handler.setFormatter(
                    ColoredFormatter(config['console']['format']))
            else:
                handler.setFormatter(
                    logging.Formatter(fmt=config['console']['format'],
                                      datefmt='%Y-%m-%d %H:%M:%S'))
            handler.addFilter(
                OutputFilter(config['console']['filters'],
                             config['console']['exclude']))
            self._logger.addHandler(handler)

        # New File Handler
        if config['file']['enabled']:
            #class logging.handlers.TimedRotatingFileHandler(filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None, errors=None)
            #handler = logging.FileHandler(filename=config['file']['file'], mode='a')
            handler = logging.handlers.TimedRotatingFileHandler(
                filename=config['file']['file'],
                when=config['file']['when'],
                interval=config['file']['interval'],
                backupCount=config['file']['backup_count'])
            handler.setLevel(config['file']['level'])
            handler.setFormatter(
                logging.Formatter(fmt=config['file']['format'],
                                  datefmt='%Y-%m-%d %H:%M:%S'))
            #if config['file'].get('filter'): handler.addFilter(logging.Filter(name=config['file']['filter']))
            handler.addFilter(
                OutputFilter(config['file']['filters'],
                             config['file']['exclude']))
            self._logger.addHandler(handler)

        self.config = config
예제 #30
0
'''
Created on Nov 12, 2018

@author: colin
'''
#===============================================================================
# Setup  Logging
#===============================================================================
import logging
import logging.config
logger = logging.getLogger('Main')

LEVEL = 'INFO'

logging.basicConfig(format='%(asctime)s %(name)s:%(levelname)s:%(message)s',
                    datefmt='%m/%d/%Y %I:%M:%S %p',
                    level=LEVEL)

from CardStack import CardViewer
from CardStack import CardConfig

config = CardConfig('config')
message = config.info()
load = config.load(
    '/Users/colin/Documents/eclipse-workspace/JobCard3/config/config.yaml')
write = config.write('/tmp/output.yaml')

print(config.keys())
build = config.build()