Esempio n. 1
0
def main(config_path):
    t0 = time.clock()
    config_loader = ConfigLoader(config_path)
    args = config_loader.load()
    config = Config(args)

    inference_img = config.get_inference_img_path()
    threshold = config.get_inference_threshold()

    output = os.path.join(ROOT_FOLDER, config.get_inference_output())

    similarities = Similarities(config.get_same_region_path())
    saliency = Saliency(config.get_salience_path())
    fusion = Fusion(config.get_fusion_path(), config.get_fusion_model_type())

    similarities_model = similarities.get_model()

    img_data = Img_Data(inference_img)

    X = saliency.inference(img_data, similarities_model)
    Y = fusion.inference(X, img_data)
    cv2.imwrite("mask.png", Y)
    Y = grabcut_inference(inference_img, Y)
    # Y = inference(cv2.imread(inference_img), Y)
    cv2.imwrite("mask_postprocess.png", Y)
    result_img = get_transparent_image(inference_img, Y, threshold)
    if os.path.isdir(output) is False:
        os.mkdir(output)
    img_name = inference_img.split("/")[-1].split(".")[0]
    result_path = output + img_name + ".png"
    cv2.imwrite(result_path, result_img)
    print("finished~( •̀ ω •́ )y")
    t1 = time.clock() - t0
    print("Time elapsed: ", t1)
Esempio n. 2
0
 def __init__(self, deployment_name):
     self.logger = getLogger(self.__class__.__name__)
     self.deployment_name = deployment_name
     self.rabbit_config = ConfigLoader()
     self.queue_list_config = self.rabbit_config.get_deployment_properties(deployment_name=deployment_name)['queues']
     self.vhost_config = self.rabbit_config.get_deployment_properties(deployment_name=deployment_name)['vHost']
     self.queues_body = self.get_queues_body(vhost=self.vhost_config, queue_list=self.queue_list_config)
Esempio n. 3
0
 def __init__(self, deployment_name):
     config.load_kube_config()
     self.logger = getLogger(self.__class__.__name__)
     self.v1 = client.AppsV1Api()
     self.rabbit_config = ConfigLoader()
     self.deployment_name = deployment_name
     self.scraper = QueuesScraper(deployment_name=deployment_name)
Esempio n. 4
0
    def get_spider(spider_name):
        """This function get a spider module from the given directory in scrapy-airflow.cfg

        :param spider_name: spider script name or spider name
        :return: spider module object
        """
        config_loader = ConfigLoader()
        spider_root = config_loader.get_setting('spider_path')
        spider_path = os.path.join(spider_root, spider_name)
        spider_module = SpiderLoader.module_loader(spider_path)
        return spider_module
    def _map_scan_cfg(self):
        if self.args.cfg:
            self.cfg_path = self.args.cfg
        for map_n in self.args.scan:
            cfg = ConfigLoader(self.cfg_path + '/' + map_n + '.cfg')
            cfg.load()
            map_data = cfg.zbx.scan_map(map_n)
            scan_map = ConfigCreate(map_data, cfg.zbx)
            scan_map.create()
            scan_map.check_map(self.cfg_path)
            scan_map.save(self.cfg_path)

            del cfg, scan_map, map_data
    def _map_img(self):
        if self.args.cfg:
            self.cfg_path = self.args.cfg
        if self.args.img:
            self.img_path = self.args.img
        for map_fn in self.args.map:
            cfg = ConfigLoader(self.cfg_path + '/' + map_fn)
            map_obj = cfg.create_map(self.font_path_fn, self.icon_path)
            map_obj.do()
            # map_obj.show()
            map_obj.save_img(self.img_path + '/' + map_fn[:-5] + '.png')

            if self.args.upload:
                cfg.upload(self.img_path + '/' + map_fn[:-5] + '.png')

            del cfg, map_obj
    def _map_img(self):
        if self.args.cfg:
            self.cfg_path = self.args.cfg
        if self.args.img:
            self.img_path = self.args.img
        for map_fn in self.args.map:
            cfg = ConfigLoader(self.cfg_path + '/' + map_fn)
            map_obj = cfg.create_map(self.font_path_fn, self.icon_path)
            map_obj.do()
            # map_obj.show()
            map_obj.save_img(self.img_path + '/' + map_fn[:-5] + '.png')

            if self.args.upload:
                cfg.upload(self.img_path + '/' + map_fn[:-5] + '.png')

            del cfg, map_obj
Esempio n. 8
0
class ScaleRunner:
    def __init__(self):

        self.config = ConfigLoader()
        self.interval = FileCache('interval', flag='cs')
        self.logger = getLogger(name=self.__class__.__name__)

    def scaling_deployment(self, deployment_name, namespace='default'):
        """
        :param str deployment_name:
        :param str namespace:
        :return:
        """
        while True:
            scaler = WorkerScaler(deployment_name=deployment_name)
            interval = self.config.get_deployment_properties(
                deployment_name=deployment_name)['interval']
            desired_pods = scaler.calculate_desired_pods()
            if desired_pods is None:
                self.logger.info(
                    f"Condition of deployment {deployment_name} no need to scale"
                )
            else:
                scaler.set_deployment_replicas(deployment_name=deployment_name,
                                               namespace=namespace,
                                               replicas_number=desired_pods)

            if interrupted:
                self.logger.info("We done here! Bye Bye")
                break

            gevent.sleep(interval)

    def asynchronous(self):
        threads = [
            gevent.spawn(self.scaling_deployment, deployment)
            for deployment in self.config.get_deployments()
        ]
        gevent.joinall(threads)
Esempio n. 9
0
# CONSTANTS
# ------------------------------------------------------------------------------

FRAME_OBJECT_INDEX = 0
FRAME_PATH_INDEX = 1
FRAME_LINE_NUMBER_INDEX = 2
FRAME_METHOD_NAME_INDEX = 3
FRAME_SNIPPET_INDEX = 4
DEFAULT_EMITTER_PORT = 7734
DEFAULT_EMITTER_HOST = '127.0.0.1'

#
# HELPER CLASSES (initialized on import)
# ------------------------------------------------------------------------------

PROFILER_CONFIG = ConfigLoader()
PROFILER_EMITTER = Emitter(
    PROFILER_CONFIG.get('telemetry_host', DEFAULT_EMITTER_PORT),
    PROFILER_CONFIG.get('telemetry_port', DEFAULT_EMITTER_HOST)
)

#
# STACK CAPTURE HOOK
# ------------------------------------------------------------------------------

""" Entry point monkey patched into the Django SQLCompiler class. Responsible for
    evaluating the call stack for execution specific code references and then
    emitting the entire payload for any listening clients to aggregate and display.

Args:
    orm_database_name: the Django connection name for the database this query ran against
Esempio n. 10
0
class QueuesScraper:

    def __init__(self, deployment_name):
        self.logger = getLogger(self.__class__.__name__)
        self.deployment_name = deployment_name
        self.rabbit_config = ConfigLoader()
        self.queue_list_config = self.rabbit_config.get_deployment_properties(deployment_name=deployment_name)['queues']
        self.vhost_config = self.rabbit_config.get_deployment_properties(deployment_name=deployment_name)['vHost']
        self.queues_body = self.get_queues_body(vhost=self.vhost_config, queue_list=self.queue_list_config)

    def get_queues_body(self, vhost, queue_list):
        body_list = []
        host = self.get_rabbit_host_from_vhost(vhost)
        rabbit = self.rabbit_login(host)
        for queue in queue_list:
            body_list.append(rabbit.get_queue(vhost=vhost, name=queue))
        return body_list

    def total_messages(self):
        message_list = list()
        for body in self.exclude_idle_queue_from_list():
            message_list.append(body['messages'])
        return sum(message_list)

    def get_queues_average_consumer_utilisation(self):

        consumer_utilisation_list = []
        for queue_body in self.exclude_idle_queue_from_list():
            consumer_utilisation_list.append(queue_body['consumer_utilisation'])
        return sum(consumer_utilisation_list) / len(consumer_utilisation_list)

    def exclude_idle_queue_from_list(self):
        try:
            ttl = self.rabbit_config.get_deployment_properties(deployment_name=self.deployment_name)['ttl']
        except KeyError:
            self.logger.info("ttl not found in deployment %s config\nUsing default ttl = 1.0" % self.deployment_name)
            ttl = 1.0

        non_idle_queues_body = []
        for queue_body in self.queues_body:
            if self.check_queue_non_idling(queue_body=queue_body, ttl=ttl):
                non_idle_queues_body.append(queue_body)
        return non_idle_queues_body

    def check_queue_non_idling(self, queue_body, ttl):
        fmt = "%Y-%m-%d %H:%M:%S"

        if self.detect_stuck_messages_queue(queue_body=queue_body, ttl=ttl):
            return False

        try:
            idle_since = queue_body['idle_since']
            idle_since_time = datetime.strptime(idle_since, fmt)
            current_time = datetime.now(timezone.utc)
            current_time = current_time.replace(tzinfo=None)
            if timedelta.total_seconds(current_time - idle_since_time) / 60 > ttl and queue_body['consumers'] > 0:
                return False
            else:
                queue_body['consumer_utilisation'] = 0
                return True
        except KeyError:
            return True

    @staticmethod
    def detect_stuck_messages_queue(queue_body, ttl):
        past_queue = FileCache('message-queue', flag='cs')
        queue_name = queue_body['name']
        current_messages = queue_body['messages']
        current_consumers = queue_body['consumers']

        current_time = datetime.now(timezone.utc)
        current_time = current_time.replace(tzinfo=None)

        if past_queue.get(queue_name):
            time_range_minutes = timedelta.total_seconds(current_time - past_queue[queue_name]['time_catch']) / 60
            if past_queue[queue_name]['messages'] == current_messages:
                if time_range_minutes > ttl:
                    return True
                if time_range_minutes < ttl:
                    return False
            else:
                past_queue[queue_name] = {'messages': current_messages, 'time_catch': current_time,
                                          'consumers': current_consumers}
                return False
        else:
            past_queue[queue_name] = {'messages': current_messages, 'time_catch': current_time,
                                      'consumers': current_consumers}
            return False

    def get_rabbit_host_from_vhost(self, vhost, caching=True):
        if caching:
            vhost_host_cache = FileCache('vhost-host', flag='cs')
            if vhost_host_cache.get(vhost):
                return vhost_host_cache[vhost]
            else:
                vhost_host_cache[vhost] = self.get_host_action(vhost)
                return vhost_host_cache[vhost]
        else:
            return self.get_host_action(vhost)

    def rabbit_login(self, host):
        """
        :param str host:
        :return:
        """
        return Client(f'{host}:15672', self.rabbit_config.username, self.rabbit_config.password)

    def get_host_action(self, vhost):
        for host in self.rabbit_config.hosts:
            cl = Client(f'{host}:15672', self.rabbit_config.username, self.rabbit_config.password)
            try:
                cl.is_alive(vhost)
                return host
            except APIError:
                pass
Esempio n. 11
0
def main():
    with LogcastCLI() as app:
        ConfigLoader()
        app.run()
from models import Base
from models.account import Account
from models.role import Role
from models.permission import Permission

from framework import Config

parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
    'action', choices=["create_db", "create_admin", "create_missing_tables"])
parser.add_argument('--config', required=False, type=str, default=None)

args = parser.parse_args()

config_loader = ConfigLoader()

if args.config:
    config_loader.load_from_path(args.config)
else:
    config_loader.load_from_default_path()

config = Config()
database_session_maker = config["database_session_maker"]


def drop_all_tables(engine):
    """
    Fix to enable SQLAlchemy to drop tables even if it didn't know about it.
    :param engine:
    :return:
Esempio n. 13
0
import sys

from config import ConfigLoader
from app import FirmQApp

# Config_path get from argv (for uwsgi:app) of default one:
if len(sys.argv) > 1:
    config_path = sys.argv[1]
else:
    config_path = "./conf/app.ini"

config_loader = ConfigLoader()
config_loader.load_from_path(config_path)

app = FirmQApp()
wsgi = app.getWsgi()

if __name__ == "__main__":
    print("Serving application at http://localhost:8000")
    app.serve()
Esempio n. 14
0
# -*- coding:utf-8 -*-

import pymysql
from collections import OrderedDict

import mysql_control as dbc
from config import ConfigLoader
from write_xls import Writer

config = ConfigLoader()
BENCHMARK_NAME = config.db_name.lower() + '_benchmark'
KEYWORD_BENCHMARK_NAME = config.db_name.lower() + '_keyword_benchmark'
SDK_RESULT_NAME = config.db_name.lower() + '_sdk_test'
NLU_RESULT_NAME = config.db_name.lower() + '_nlu_test'
KEYWORD_RESULT_NAME = config.db_name.lower() + '_keyword_test'
HOTWORD_RESULT_NAME = config.db_name.lower() + '_hotword_test'


def format_data(category, target, sdk_data_old, sdk_data_new, nlu_data_old,
                nlu_data_new):
    '''
        合并多行数据降维成一行 excel 格式
    '''

    classify_result = [
        list(zip(row[0], row[1], row[2]))
        for row in list(zip(target, sdk_data_old, sdk_data_new))
    ]
    print(category, target, sdk_data_new, sdk_data_old, nlu_data_new,
          nlu_data_old)
Esempio n. 15
0
# -*- coding: UTF-8 -*-
import os
import sys
import pick
from status import Status
from subprocess import run
from config import ConfigLoader

OUTPUT_FILTERS_PATH = ConfigLoader().CFG['filters_configs_path']
FILTERS_DEPLOY_PATH = ConfigLoader().CFG['filters_deploy_folder']


class FilterDeploy(object):
    def __init__(self, filter_type):
        if self._ask_input('Do you want to deploy this filter? y/n : '):
            try:
                deploy_script = os.path.dirname(
                    os.path.realpath(__file__)) + '/contrib/deploy.sh'
                hub_binary = os.path.dirname(
                    os.path.realpath(__file__)) + '/contrib/hub'
                filter_folders = self.__list_folders(FILTERS_DEPLOY_PATH)
                title_environment = 'Please choose environment : '
                ENVIRONMENT, index_env = pick.pick(filter_folders,
                                                   title_environment)

                script = run([
                    deploy_script, ENVIRONMENT, filter_type,
                    FILTERS_DEPLOY_PATH, OUTPUT_FILTERS_PATH, hub_binary
                ])
                if script.returncode == 0:
                    Status.show(
Esempio n. 16
0
# ---------- Directory Setting ----------

work_dir = './'
data_dir = join(work_dir, 'data')
conf_dir = join(work_dir, 'config')
out_dir = join(work_dir, 'out')

question_dir = join(data_dir, 'question')
synth_label_dir = join(data_dir, 'test_labels')

# ---------- Global Config ----------

types = ['duration', 'acoustic']
device = 'cuda' if torch.cuda.is_available else 'cpu'
config = ConfigLoader(join(conf_dir, 'trn.cnf'))
question_file = join(question_dir, 'linguistic.hed')

# ---------- Data Load ----------

x_paths = {}
t_paths = {}
dataset = {}
loader = {}

for type_ in types:
    print('Loading %s dataset ... ' % (type_), end='')
    x_paths[type_] = glob(join(data_dir, 'x_' + type_, '*.bin'))
    t_paths[type_] = glob(join(data_dir, 't_' + type_, '*.bin'))

    x_dim = config.get_feature_config().get_linguistic_dim(type_)
Esempio n. 17
0
    def __init__(self):

        self.config = ConfigLoader()
        self.interval = FileCache('interval', flag='cs')
        self.logger = getLogger(name=self.__class__.__name__)
Esempio n. 18
0
from os import path
from status import Status
from config import ConfigLoader 
from jinja2 import Environment, FileSystemLoader

TEMPLATES_FOLDER = path.dirname(path.abspath(__file__)) + '/templates'
OUTPUT_FILTERS_PATH = ConfigLoader().CFG['filters_configs_path']

class FilterTemplates(object):

    def __init__(self, filter_name, date_key):
        self.jinja = Environment(loader=FileSystemLoader(TEMPLATES_FOLDER),trim_blocks=True)

        self._input_template(filter_name)
        self._filter_template(filter_name, date_key)
        self._output_template(filter_name)

    def _input_template(self, filter_name):
        input_file = '/etc/logstash/conf.d/input/' + filter_name + '.log'
        input_template_content = self._render('input.j2', type=filter_name, input_file=input_file )
        
        dest_file = OUTPUT_FILTERS_PATH + '/input.conf'
        self._create_file(input_template_content, dest_file)

    def _filter_template(self, filter_name, date_key):
        filter_template_content = self._render('filter.j2', type=filter_name, date_key=date_key )

        dest_file = OUTPUT_FILTERS_PATH + '/filter_' + filter_name + '.conf' 
        self._create_file(filter_template_content, dest_file)

    def _output_template(self, filter_name):
Esempio n. 19
0
    result = list(map(engine.add_margin, result))

    if 'min_price' in request.args:
        result = min(filter(lambda route: 'price' in route, result),
                     key=lambda route: route['price'])

    if result is not None:
        return json.dumps(result)
    else:
        abort(400)


@app.route('/create_booking', methods=['POST'])
def create_booking():
    conn_spec = request.json['connection']
    count = sum(
        map(lambda p: p['number_of_passengers'], request.json['passengers']))
    user_id = request.json['user_id']
    return json.dumps(engine.create_booking(conn_spec, count, user_id))


@app.route('/list_bookings')
def list_bookings():
    user_id = request.args.get('user_id')
    return json.dumps(engine.list_bookings(user_id))


if __name__ == '__main__':
    ConfigLoader().start()
    app.run(threaded=True)
Esempio n. 20
0
import config.Logger as logging
from tasks import RealTimeTasks, SleepUntilTomorrow
import reporters.PushBulletManager as pbm
import time

log = logging.getLogger()
log.info("Initializing RTSN")

symbols = []
# Map of stocks by symbol
stocks = {}
# Map of rules by symbol
rules = {}

rules = RuleLoader.load()
config = configurer.load()

for symbol in rules:
    symbols.append(symbol)

log.debug("loaded Rules for " + str(symbols))
tasks = []

pb = pbm.create(config["push-bullet-api-key"])

while True:
    time.sleep(SleepUntilTomorrow.getTimeSecondsUntilNextOpening())
    for symbol in symbols:
        task = RealTimeTasks.Task(symbol, symbol, config["refresh-time"], rules[symbol], pb)
        tasks.append(task)
        task.start()
Esempio n. 21
0
class WorkerScaler:

    def __init__(self, deployment_name):
        config.load_kube_config()
        self.logger = getLogger(self.__class__.__name__)
        self.v1 = client.AppsV1Api()
        self.rabbit_config = ConfigLoader()
        self.deployment_name = deployment_name
        self.scraper = QueuesScraper(deployment_name=deployment_name)

    def calculate_desired_pods(self, namespace='default'):
        """
        :param str namespace:
        :return: number of desired pods
        """
        deployment_properties = self.rabbit_config.get_deployment_properties(deployment_name=self.deployment_name)
        max_pod = deployment_properties['maxPod']
        queues = self.scraper.exclude_idle_queue_from_list()

        try:
            accumulative_limit = deployment_properties['accumulativeLimit']
        except KeyError:
            self.logger.info("accumulativeLimit not found in deployment %s config\nUsing default accumulativeLimit = 1"
                             % self.deployment_name)
            accumulative_limit = 1

        try:
            min_pod = deployment_properties['minPod']
        except KeyError:
            self.logger.info(
                "minPod not found in deployment %s config\nUsing default minPod = 0" % self.deployment_name)
            min_pod = 0

        current_pods = self.get_deployment_replicas(deployment_name=self.deployment_name, namespace=namespace)

        if not queues:
            self.logger.info(f"All queues are idle")
            if current_pods == min_pod:
                self.logger.info(f"current pods of {self.deployment_name} is min pods")
                return None
            else:
                self.logger.info(f"Scale {self.deployment_name} from {current_pods} to {min_pod}")
                return min_pod

        average_consumer_utilisation = self.scraper.get_queues_average_consumer_utilisation()

        desired_pods = current_pods
        if current_pods < min_pod:
            desired_pods = min_pod
        elif min_pod <= current_pods < max_pod:
            if average_consumer_utilisation < 0.9:
                desired_pods = current_pods + accumulative_limit
        elif current_pods >= max_pod:
            desired_pods = max_pod

        if desired_pods == current_pods == max_pod:
            self.logger.info(f"Current pods of {self.deployment_name} hit max threshold: {max_pod}")
            return None
        elif desired_pods == current_pods < max_pod:
            self.logger.info(f"Current pods of {self.deployment_name} are suitable: {current_pods}")
            return None
        else:
            self.logger.info(f"Scale {self.deployment_name} from {current_pods} to {desired_pods}")
            return desired_pods

    def set_deployment_replicas(self, deployment_name, namespace='default', replicas_number=1):
        """
        :param str deployment_name:
        :param str namespace:
        :param int replicas_number:
        :return: deployment body
        """
        body = self.v1.read_namespaced_deployment_scale(name=deployment_name, namespace=namespace)
        body.spec.replicas = replicas_number
        try:
            api_response = self.v1.patch_namespaced_deployment_scale(name=deployment_name, namespace=namespace,
                                                                     body=body)
            return api_response
        except ApiException as e:
            self.logger.error("Exception when calling AppsV1Api->patch_namespaced_deployment_scale: %s\n" % e)

    def get_deployment_replicas(self, deployment_name, namespace='default'):
        """
        :param str deployment_name:
        :param str namespace:
        :return: deployment replicas
        """
        try:
            body = self.v1.read_namespaced_deployment_scale(name=deployment_name, namespace=namespace)
            return body.status.replicas
        except ApiException as e:
            self.logger.error("Exception when calling AppsV1Api-->read_namespaced_deployment_scale: %s\n" % e)
Esempio n. 22
0
import logging.config
import os

import yaml

from config import ConfigLoader
from sync import Sync
from util import read_file
from worker import Worker

# Load all properties from INI and point to the folder where resources are stored
props_file = "app.ini"
cl = ConfigLoader.load(props_file, 'sync_resource')

# Just for process output
with open("logging.yml") as log_cfg:
    logging.config.dictConfig(yaml.safe_load(log_cfg))

sync_options = cl.get_config('sync')
resources_config = cl.get_resource_config()
log_folder = sync_options.get("log_folder")
os.makedirs(log_folder, exist_ok=True)

# The "base" config into which sync config gets merged
template_config = resources_config.merge_with(sync_options.values)
template_config.set_value("log_folder", os.path.abspath(log_folder))

# Read in the set of example syncs
example_sync_data = read_file("../example_sync/example.yml")

# Execute (single thread for now)
Esempio n. 23
0
    def __init__(self, args):
        # Meh...
        working_dir = args.project_dir
        project_name = args.service
        threads = args.threads
        # /Meh...

        self.args = args
        self.name = project_name
        self.threads = threads
        self.working_dir = os.path.join(working_dir, self.name)
        self.acquisition_dir = os.path.join(self.working_dir, "acquisition")

        if os.path.exists(self.working_dir):
            IO.put("Resuming project in " + self.working_dir, "highlight")
        else:
            os.makedirs(self.working_dir, exist_ok=True)
            IO.put("Initializing project in " + self.working_dir, "highlight")

        self.project_folders["data"] = os.path.join(self.acquisition_dir,
                                                    "data")
        self.project_folders["logs"] = os.path.join(self.working_dir, "logs")
        self.project_folders["metadata"] = os.path.join(
            self.acquisition_dir, "metadata")
        #self.project_folders["trash"] = os.path.join(self.acquisition_dir, "trash")
        #self.project_folders["trash_metadata"] = os.path.join(self.acquisition_dir, "trash_metadata")

        self.config_file = os.path.join(self.working_dir, "config.cfg")

        for f in self.project_folders:
            IO.put("{} path is {}".format(f, self.project_folders[f]))
            if not os.path.exists(self.project_folders[f]):
                IO.put("{} directory not found, creating from scratch.",
                       "warn")
                os.makedirs(self.project_folders[f], exist_ok=True)

        IO.put("Config file is " + self.config_file)

        if not os.path.isfile(self.config_file):
            IO.put("Config file not found, creating default config file",
                   "warn")
            with open(self.config_file, 'w') as f:
                f.write(DefaultConfigs.defaults)

        self.config = ConfigLoader.ConfigLoader()
        self.config.from_file(self.config_file)

        self.transaction_log = os.path.join(self.project_folders["logs"],
                                            "transaction.log")
        self.exception_log = os.path.join(self.project_folders["logs"],
                                          "exception.log")

        self.transaction_logger = logging.getLogger(project_name + "_t")
        self.exception_logger = logging.getLogger(project_name + "_e")

        self.transaction_logger.setLevel(20)
        self.exception_logger.setLevel(20)

        tfh = FileHandler(self.transaction_log)
        efh = FileHandler(self.exception_log)

        fmt = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        fmt.converter = time.gmtime
        tfh.setFormatter(fmt)
        efh.setFormatter(fmt)

        self.transaction_logger.addHandler(tfh)
        self.exception_logger.addHandler(efh)