Example #1
0
def dashboard():
    """Primary dashboard the users will interact with."""
    logger.info("User: {} authenticated proceeding to dashboard.".format(
        session.get("id_token")["sub"]))

    if "Mozilla-LDAP" in session.get("userinfo")["sub"]:
        logger.info(
            "Mozilla IAM user detected. Attempt enriching with ID-Vault data.")
        try:
            session["idvault_userinfo"] = person_api.get_userinfo(
                session.get("id_token")["sub"])
        except Exception as e:
            logger.error(
                "Could not enrich profile due to: {}.  Perhaps it doesn't exist?"
                .format(e))

    # Hotfix to set user id for firefox alert
    # XXXTBD Refactor rules later to support full id_conformant session
    session["userinfo"]["user_id"] = session.get("id_token")["sub"]

    # Transfer any updates in to the app_tiles.
    S3Transfer(config.Config(app).settings).sync_config()

    # Send the user session and browser headers to the alert rules engine.
    Rules(userinfo=session["userinfo"], request=request).run()

    user = User(session, config.Config(app).settings)
    apps = user.apps(Application(app_list.apps_yml).apps)

    return render_template("dashboard.html",
                           config=app.config,
                           user=user,
                           apps=apps,
                           alerts=None)
Example #2
0
def dashboard():
    """Primary dashboard the users will interact with."""
    logger.info("User: {} authenticated proceeding to dashboard.".format(session.get('id_token')['sub']))

    if "Mozilla-LDAP" in session.get('userinfo')['sub']:
        logger.info("Mozilla IAM user detected. Attempt enriching with ID-Vault data.")
        try:
            session['idvault_userinfo'] = person_api.get_userinfo(session.get('id_token')['sub'])
        except Exception as e:
            logger.error("Could not enrich profile.  Perhaps it doesn't exist?")

    # Transfer any updates in to the app_tiles.
    S3Transfer(config.Config(app).settings).sync_config()

    # Send the user session and browser headers to the alert rules engine.
    Rules(userinfo=session['userinfo'], request=request).run()

    user = User(session, config.Config(app).settings)
    apps = user.apps(Application().apps)

    return render_template(
        'dashboard.html',
        config=app.config,
        user=user,
        apps=apps,
        alerts=None
    )
Example #3
0
    def __init__(self, cookies_path, config_path, max_retries=5, memory_file=None):
        self.Exceptions = HangupsBotExceptions()

        self.shared = {} # safe place to store references to objects

        self._client = None
        self._cookies_path = cookies_path
        self._max_retries = max_retries

        # These are populated by on_connect when it's called.
        self._conv_list = None # hangups.ConversationList
        self._user_list = None # hangups.UserList
        self._handlers = None # handlers.py::EventHandler

        self._cache_event_id = {} # workaround for duplicate events

        self._locales = {}

        # Load config file
        try:
            self.config = config.Config(config_path)
        except ValueError:
            logging.exception("failed to load config, malformed json")
            sys.exit()

        # set localisation if anything defined in config.language or ENV[HANGOUTSBOT_LOCALE]
        _language = self.get_config_option('language') or os.environ.get("HANGOUTSBOT_LOCALE")
        if _language:
            self.set_locale(_language)

        # load in previous memory, or create new one
        self.memory = None
        if memory_file:
            _failsafe_backups = int(self.get_config_option('memory-failsafe_backups') or 3)
            _save_delay = int(self.get_config_option('memory-save_delay') or 1)

            logger.info("memory = {}, failsafe = {}, delay = {}".format(
                memory_file, _failsafe_backups, _save_delay))

            self.memory = config.Config(memory_file, failsafe_backups=_failsafe_backups, save_delay=_save_delay)
            if not os.path.isfile(memory_file):
                try:
                    logger.info("creating memory file: {}".format(memory_file))
                    self.memory.force_taint()
                    self.memory.save()

                except (OSError, IOError) as e:
                    logger.exception('FAILED TO CREATE DEFAULT MEMORY FILE')
                    sys.exit()

        # Handle signals on Unix
        # (add_signal_handler is not implemented on Windows)
        try:
            loop = asyncio.get_event_loop()
            for signum in (signal.SIGINT, signal.SIGTERM):
                loop.add_signal_handler(signum, lambda: self.stop())
        except NotImplementedError:
            pass
Example #4
0
def notifications():
    user = User(session, config.Config(app).settings)
    return render_template(
        'notifications.html',
        config=app.config,
        user=user,
    )
Example #5
0
def configure_logging(args):
    log_level = 'DEBUG' if args.debug else 'INFO'

    default_config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'console': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%H:%M:%S'
            },
            'default': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S'
            }
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'stream': 'ext://sys.stdout',
                'level': log_level,  # 'INFO',
                'formatter': 'console'
            },
            'file': {
                'class': 'logging.FileHandler',
                'filename': args.log,
                'level': log_level,
                'formatter': 'default',
            }
        },
        'loggers': {
            # root logger
            '': {
                'handlers': ['file', 'console'],
                'level': log_level
            },
            'requests': {
                'level': 'WARNING'
            },
            'urllib3': {
                'level': 'WARNING'
            },
            'plugins': {
                'level': 'DEBUG'
            }
        }
    }

    logging_config = default_config

    bootcfg = config.Config(args.config)
    if bootcfg.exists(["logging.system"]):
        logging_config = bootcfg["logging.system"]

    logging.config.dictConfig(logging_config)

    logger = logging.getLogger()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    logger.warning("log_level is {}".format(log_level))
Example #6
0
def styleguide_notifications():
    user = FakeUser(config.Config(app).settings)
    return render_template(
        'notifications.html',
        config=app.config,
        user=user,
    )
Example #7
0
def styleguide_dashboard():
    user = FakeUser(config.Config(app).settings)
    apps = user.apps(Application(app_list.apps_yml).apps)

    return render_template(
        "dashboard.html", config=app.config, user=user, apps=apps, alerts=None
    )
Example #8
0
def run_bot(config_path):
    conf = config.Config(config_path)
    bot = telebot.TeleBot(API_TOKEN)
    plugins.tracking.setting(bot, conf)
    plugins.load_user_plugins(bot)

    bot.polling(none_stop=True)
    conf.flush()
Example #9
0
def alert_faking():
    if request.method == 'GET':
        if app.config.get('SERVER_NAME') != 'sso.mozilla.com':
            """Only allow alert faking in non production environment."""
            user = User(session, config.Config(app).settings)
            fake_alerts = FakeAlert(user_id=user.userinfo.get('sub'))
            fake_alerts.create_fake_alerts()

    return redirect('/dashboard', code=302)
Example #10
0
def alert_operation(alert_id):
    if request.method == 'POST':
        user = User(session, config.Config(app).settings)
        result = user.acknowledge_alert(alert_id)

        if result['ResponseMetadata']['HTTPStatusCode'] == 200:
            return '200'
        else:
            return '500'
Example #11
0
def compactor_daemon(conf_file):
    """
    Run the compactor daemon.

    :param conf_file: Name of the configuration file.
    """

    eventlet.monkey_patch()
    conf = config.Config(conf_file=conf_file)
    compactor.compactor(conf)
Example #12
0
def cluster_up(args):
    """Ensure all nodes of an existing cluster are up.

    """
    conf = config.Config()
    if args.name not in conf.clusters:
        logging.error("Unknown cluster '%s'", args.name)
        return 1
    provider = conf.clusters[args.name]["provider"]
    return core.start_cluster(args.name, provider, conf)
Example #13
0
def ssh(args):
    """Open a `screen(1)` session connected to all cluster nodes.

    """
    conf = config.Config()
    if args.name not in conf.clusters:
        logging.error("Unknown cluster '%s'", args.name)
        return 1
    provider = conf.clusters[args.name]["provider"]
    return core.ssh_session(args.name, provider, conf)
Example #14
0
def provision_cluster(args):
    """Configures all nodes in a cluster.

    """
    conf = config.Config()
    if args.name not in conf.clusters:
        logging.error("Unknown cluster '%s'", args.name)
        return 1
    provider = conf.clusters[args.name]["provider"]
    return core.provision_cluster(args.name, provider, conf)
Example #15
0
def destroy_cluster(args):
    """Destroy a cluster.

    """
    conf = config.Config()
    if args.name not in conf.clusters:
        logging.error("Unknown cluster '%s'", args.name)
        return 1
    provider = conf.clusters[args.name]["provider"]
    return core.destroy_cluster(args.name, provider, conf)
Example #16
0
def remote_daemon(conf_file):
    """
    Run the external control daemon.

    :param conf_file: Name of the configuration file.
    """

    eventlet.monkey_patch()
    conf = config.Config(conf_file=conf_file)
    daemon = remote.RemoteControlDaemon(None, conf)
    daemon.serve()
Example #17
0
def cluster_env(args):
    """Prints the command-line environment for accessing a cluster.

    """
    conf = config.Config()
    if args.name not in conf.clusters:
        logging.error("Unknown cluster '%s'", args.name)
        return 1
    provider = conf.clusters[args.name]["provider"]
    for k, v in core.cluster_env(args.name, provider, conf):
        print "export %s=%s" % (k, v)
def cli(info, config_path):
    """a token based matrix registration app"""
    config.config = config.Config(config_path)
    logging.config.dictConfig(config.config.logging)
    app = info.load_app()
    with app.app_context():
        app.config.from_mapping(
            SQLALCHEMY_DATABASE_URI=config.config.db.format(cwd=f"{os.getcwd()}/"),
            SQLALCHEMY_TRACK_MODIFICATIONS=False
        )
        db.init_app(app)
        db.create_all()
        tokens.tokens = tokens.Tokens()
Example #19
0
File: log.py Project: xxoxx/cobra
 def __init__(self):
     logs_directory = config.Config('cobra', 'logs_directory').value
     logs_directory = os.path.join(config.Config().project_directory,
                                   logs_directory)
     if os.path.isdir(logs_directory) is not True:
         os.mkdir(logs_directory)
     filename = os.path.join(logs_directory, 'cobra.log')
     logging.config.dictConfig({
         'version': 1,
         'disable_existing_loggers': True,
         'formatters': {
             'verbose': {
                 'format':
                 "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
                 'datefmt': "%Y-%m-%d %H:%M:%S"
             },
             'simple': {
                 'format': '%(levelname)s %(message)s'
             },
         },
         'handlers': {
             'file': {
                 'level': 'DEBUG',
                 'class': 'cloghandler.ConcurrentRotatingFileHandler',
                 'maxBytes': 1024 * 1024 * 10,
                 'backupCount': 50,
                 'delay': True,
                 'filename': filename,
                 'formatter': 'verbose'
             }
         },
         'loggers': {
             '': {
                 'handlers': ['file'],
                 'level': 'DEBUG',
             },
         }
     })
Example #20
0
def alert_operation(alert_id):
    if request.method == "POST":
        user = User(session, config.Config(app).settings)
        if request.data is not None:
            data = json.loads(request.data.decode())
            helpfulness = data.get("helpfulness")
            alert_action = data.get("alert_action")

        result = user.take_alert_action(alert_id, alert_action, helpfulness)

        if result["ResponseMetadata"]["HTTPStatusCode"] == 200:
            return "200"
        else:
            return "500"
Example #21
0
def alert_operation(alert_id):
    if request.method == 'POST':
        user = User(session, config.Config(app).settings)
        if request.data is not None:
            data = json.loads(request.data.decode())
            helpfulness = data.get('helpfulness')
            alert_action = data.get('alert_action')

        result = user.take_alert_action(alert_id, alert_action, helpfulness)

        if result['ResponseMetadata']['HTTPStatusCode'] == 200:
            return '200'
        else:
            return '500'
Example #22
0
def create_cluster(args):
    """Create a cluster and start all its nodes.

    """
    conf = config.Config()
    if args.name in conf.clusters:
        logging.error("Cluster `%s` already exists", args.name)
        return 1

    network = ipaddress.ip_network(u"%s" % (args.flannel_network, ))
    subnet_length = args.flannel_subnet_length
    subnet_min = ipaddress.ip_network(u"%s/%d" %
                                      (args.flannel_subnet_min, subnet_length))
    subnet_max = ipaddress.ip_network(u"%s/%d" %
                                      (args.flannel_subnet_max, subnet_length))
    services_ip_range = ipaddress.ip_network(u"%s" %
                                             (args.services_ip_range, ))
    dns_service_ip = ipaddress.ip_address(u"%s" % (args.dns_service_ip, ))
    kubernetes_service_ip = ipaddress.ip_address(
        u"%s" % (args.kubernetes_service_ip, ))
    for net in (subnet_min, subnet_max):
        if net.prefixlen != subnet_length:
            logging.error("Network %s is not a /%d network", net,
                          subnet_length)
            return 1
        if not net.subnet_of(network):
            logging.error("Network %s is not a subnet of %s", net, network)
            return 1
    if services_ip_range.overlaps(network):
        logging.error("Service IP range %s overlaps with network %s",
                      services_ip_range, network)
        return 1
    if dns_service_ip not in services_ip_range:
        logging.error("DNS service IP address %s not in service IP range %s",
                      dns_service_ip, services_ip_range)
        return 1
    if kubernetes_service_ip not in services_ip_range:
        logging.error(
            "Kubernetes API IP address %s not in service IP range %s",
            kubernetes_service_ip, services_ip_range)
        return 1

    provider = get_provider(args.provider)
    core.create_cluster(args.name, args.channel, args.num_etcd, args.size_etcd,
                        args.num_workers, args.size_workers, provider,
                        args.location, network, subnet_length, subnet_min,
                        subnet_max, services_ip_range, dns_service_ip,
                        kubernetes_service_ip, conf)
    cluster_up(args)
    return provision_cluster(args)
Example #23
0
def main():
    parser = argparse.ArgumentParser(
        description='a token based matrix registration app',
        prog='python -m matrix_registration')
    parser.add_argument('--config-path', default='config.yaml'
            , help='specifies the config file to be used', metavar='PATH')

    # subparser
    subparsers = parser.add_subparsers(
                            help='sub-commands. for ex. \'gen -h\' ' +
                            'for additional help')

    # api-parser
    parser_a = subparsers.add_parser('api', help='start as api')
    parser_a.set_defaults(func=run_api)

    # generate-parser
    parser_g = subparsers.add_parser('gen',
                                     help='generate new token. ' +
                                     '-o onetime, -e expire date')
    parser_g.add_argument('-o', '--one-time', action='store_true',
                          help='make token one-time-useable')
    parser_g.add_argument('-e', '--expire', type=str, default=None,
                          help='expire date: DD.MM.YYYY')
    parser_g.set_defaults(func=generate_token)

    # status-parser
    parser_s = subparsers.add_parser('status',
                                     help='view status or disable ' +
                                     'token. -s status, -d disable, -l list')
    parser_s.add_argument('-s', '--status', type=str, default=None,
                          help='token status')
    parser_s.add_argument('-l', '--list', action='store_true',
                          help='list tokens')
    parser_s.add_argument('-d', '--disable', type=str, default=None,
                          help='disable token')
    parser_s.set_defaults(func=status_token)

    args = parser.parse_args()

    config.config = config.Config(args.config_path)
    logging.config.dictConfig(config.config.logging)
    tokens.tokens = tokens.Tokens()

    logger = logging.getLogger(__name__)

    logger.debug('called with args: %s' % args)
    if 'func' in args:
        args.func(args)
Example #24
0
    def __init__(self, args, operating_system):
        """Create an instance of the controller passing in the debug flag,
        the options and arguments from the cli parser.

        :param argparse.Namespace args: Command line arguments
        :param str operating_system: Operating system name from helper.platform

        """
        self.set_state(self.STATE_INITIALIZING)
        self.args = args
        try:
            self.config = config.Config(args.config)
        except ValueError:
            sys.exit(1)
        self.debug = args.foreground
        logging.config.dictConfig(self.config.logging)
        self.operating_system = operating_system
        self.pending_signals = multiprocessing.Queue()
Example #25
0
def dump_limits(conf_file, limits_file, debug=False):
    """
    Dump the current limits from the Redis database.

    :param conf_file: Name of the configuration file, for connecting
                      to the Redis database.
    :param limits_file: Name of the XML file that the limits will be
                        dumped to.  Use '-' to dump to stdout.
    :param debug: If True, debugging messages are emitted while
                  dumping the limits.
    """

    # Connect to the database...
    conf = config.Config(conf_file=conf_file)
    db = conf.get_database()
    limits_key = conf['control'].get('limits_key', 'limits')

    # Now, grab all the limits
    lims = [
        limits.Limit.hydrate(db, msgpack.loads(lim))
        for lim in db.zrange(limits_key, 0, -1)
    ]

    # Build up the limits tree
    root = etree.Element('limits')
    limit_tree = etree.ElementTree(root)
    for idx, lim in enumerate(lims):
        if debug:
            print >> sys.stderr, "Dumping limit index %d: %r" % (idx, lim)
        make_limit_node(root, lim)

    # Write out the limits file
    if limits_file == '-':
        limits_file = sys.stdout
    if debug:
        print >> sys.stderr, "Dumping limits to file %r" % limits_file
    limit_tree.write(limits_file,
                     xml_declaration=True,
                     encoding='UTF-8',
                     pretty_print=True)
Example #26
0
def configure_logging(args):
    """Configure Logging

    If the user specified a logging config file, open it, and
    fail if unable to open. If not, attempt to open the default
    logging config file. If that fails, move on to basic
    log configuration.
    """

    log_level = 'DEBUG' if args.debug else 'INFO'

    default_config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'console': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%H:%M:%S'
            },
            'default': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S'
            }
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'stream': 'ext://sys.stdout',
                'level': 'INFO',
                'formatter': 'console'
            },
            'file': {
                'class': 'logging.FileHandler',
                'filename': args.log,
                'level': log_level,
                'formatter': 'default',
            }
        },
        'loggers': {
            # root logger
            '': {
                'handlers': ['file', 'console'],
                'level': log_level
            },

            # requests is freakishly noisy
            'requests': {
                'level': 'INFO'
            },

            # XXX: suppress erroneous WARNINGs until resolution of
            #   https://github.com/tdryer/hangups/issues/142
            'hangups': {
                'level': 'ERROR'
            },

            # asyncio's debugging logs are VERY noisy, so adjust the log level
            'asyncio': {
                'level': 'WARNING'
            },

            # hangups log is verbose too, suppress so we can debug the bot
            'hangups.conversation': {
                'level': 'ERROR'
            }
        }
    }

    # Temporarily bring in the configuration file, just so we can configure
    # logging before bringing anything else up. There is no race internally,
    # if logging() is called before configured, it outputs to stderr, and
    # we will configure it soon enough
    bootcfg = config.Config(args.config)
    if bootcfg.exists(["logging.system"]):
        logging.config.dictConfig(bootcfg["logging.system"])
    else:
        logging.config.dictConfig(default_config)

    logger = logging.getLogger()
    if args.debug:
        logger.setLevel(logging.DEBUG)
Example #27
0
from models.user import User
from op.yaml_loader import Application
from models.alert import Rules
from models.tile import S3Transfer

logging.basicConfig(level=logging.INFO)

with open('logging.yml', 'r') as log_config:
    config_yml = log_config.read()
    config_dict = yaml.load(config_yml)
    logging.config.dictConfig(config_dict)

logger = logging.getLogger('sso-dashboard')

app = Flask(__name__)
app.config.from_object(config.Config(app).settings)

S3Transfer(config.Config(app).settings).sync_config()

assets = Environment(app)

js = Bundle('js/base.js', filters='jsmin', output='js/gen/packed.js')
assets.register('js_all', js)


sass = Bundle('css/base.scss', filters='scss')
css = Bundle(sass, filters='cssmin', output='css/gen/all.css')
assets.register('css_all', css)

# Hack to support serving .svg
mimetypes.add_type('image/svg+xml', '.svg')
Example #28
0
def configure_logging(args):
    """Configure Logging

    If the user specified a logging config file, open it, and
    fail if unable to open. If not, attempt to open the default
    logging config file. If that fails, move on to basic
    log configuration.
    """

    log_level = 'DEBUG' if args.debug else 'INFO'

    default_config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'console': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%H:%M:%S'
            },
            'default': {
                'format': '%(asctime)s %(levelname)s %(name)s: %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S'
            }
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'stream': 'ext://sys.stdout',
                'level': 'INFO',
                'formatter': 'console'
            },
            'file': {
                'class': 'logging.FileHandler',
                'filename': args.log,
                'level': log_level,
                'formatter': 'default',
            }
        },
        'loggers': {
            # root logger
            '': {
                'handlers': ['file', 'console'],
                'level': log_level
            },

            # requests is freakishly noisy
            'requests': {
                'level': 'INFO'
            },

            # XXX: suppress erroneous WARNINGs until resolution of
            #   https://github.com/tdryer/hangups/issues/142
            'hangups': {
                'level': 'ERROR'
            },

            # asyncio's debugging logs are VERY noisy, so adjust the log level
            'asyncio': {
                'level': 'WARNING'
            },

            # hangups log is verbose too, suppress so we can debug the bot
            'hangups.conversation': {
                'level': 'ERROR'
            }
        }
    }

    logging_config = default_config

    # Temporarily bring in the configuration file, just so we can configure
    # logging before bringing anything else up. There is no race internally,
    # if logging() is called before configured, it outputs to stderr, and
    # we will configure it soon enough
    bootcfg = config.Config(args.config)
    if bootcfg.exists(["logging.system"]):
        logging_config = bootcfg["logging.system"]

    if "extras.setattr" in logging_config:
        for class_attr, value in logging_config["extras.setattr"].items():
            try:
                [modulepath, classname,
                 attribute] = class_attr.rsplit(".", maxsplit=2)
                try:
                    setattr(class_from_name(modulepath, classname), attribute,
                            value)
                except ImportError:
                    logging.error("module {} not found".format(modulepath))
                except AttributeError:
                    logging.error("{} in {} not found".format(
                        classname, modulepath))
            except ValueError:
                logging.error("format should be <module>.<class>.<attribute>")

    logging.config.dictConfig(logging_config)

    logger = logging.getLogger()
    if args.debug:
        logger.setLevel(logging.DEBUG)
Example #29
0
logging.basicConfig(level=logging.INFO)

with open('logging.yml', 'r') as log_config:
    config_yml = log_config.read()
    config_dict = yaml.load(config_yml)
    logging.config.dictConfig(config_dict)

logger = logging.getLogger('sso-dashboard')

app = Flask(__name__)

talisman = Talisman(app,
                    content_security_policy=DASHBOARD_CSP,
                    force_https=False)

app.config.from_object(config.Config(app).settings)
app_list = S3Transfer(config.Config(app).settings)
app_list.sync_config()

assets = Environment(app)
js = Bundle('js/base.js', filters='jsmin', output='js/gen/packed.js')
assets.register('js_all', js)

sass = Bundle('css/base.scss', filters='scss')
css = Bundle(sass, filters='cssmin', output='css/gen/all.css')
assets.register('css_all', css)

# Hack to support serving .svg
mimetypes.add_type('image/svg+xml', '.svg')

oidc_config = config.OIDCConfig()
Example #30
0
"""
    utils.log
    ~~~~~~~~~

    Implements log initialize

    :author:    Feei <*****@*****.**>
    :homepage:  https://github.com/wufeifei/cobra
    :license:   MIT, see LICENSE for more details.
    :copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
import logging.config
from utils import config

logs_directory = config.Config('cobra', 'logs_directory').value
logs_directory = os.path.join(config.Config().project_directory, logs_directory)
if os.path.isdir(logs_directory) is not True:
    os.mkdir(logs_directory)
filename = os.path.join(logs_directory, 'cobra.log')
logging.config.dictConfig({
    'version': 1,
    'disable_existing_loggers': True,
    'formatters': {
        'verbose': {
            'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
            'datefmt': "%Y-%m-%d %H:%M:%S"
        },
        'simple': {
            'format': '%(levelname)s %(message)s'
        },