def __init__(self, location=None, search_pat=datafile_pat): # Set up logger to track progress logs.get_logger(self=self) if location is not None: self.findData(location, search_pat) else: self.logger.debug('MNIST Only: No data location specified') self.logger.debug('... find datasets with self.findData') pass
def __init__(self, mainloop, audio_buffer, app): self.__mainloop = mainloop self.__audio_buffer = audio_buffer self.__app = app self.__logger = get_logger() self.__log_regex = re.compile('[0-9]{2}:[0-9]{2}:[0-9]{2}' '\.[0-9]{3}\s(W|I|E)\s')
def __init__(self, model: nn.Module, optimizer: optim.Optimizer, scheduler: SchedulerType, dataset: str, batch_size: int, num_workers: int, data_path: Path, log_path: Path, checkpoint_path: Path): self.logger = get_logger(name=__name__, save_dir=str(log_path / 'logs')) self.logger.info('Initializing Classification Model Trainer.') if dataset.upper() == 'CIFAR10': train_loader, eval_loader = get_cifar10_loaders( data_root=str(data_path), batch_size=batch_size, num_workers=num_workers, augment=True) else: raise NotImplementedError('Only CIFAR10 implemented.') self.model = model # Assumes model has already been sent to device. self.optimizer = optimizer # Assumes optimizer is associated with model. self.device = get_single_model_device( model) # Finds device of model assuming it is on a single device. self.loss_func = nn.CrossEntropyLoss() self.writer = SummaryWriter(str(log_path)) self.manager = CheckpointManager(model, optimizer, checkpoint_path, mode='max', save_best_only=True, max_to_keep=1) self.scheduler = scheduler # No learning rate scheduling if scheduler = None. self.train_loader = train_loader self.eval_loader = eval_loader self.epoch = 0 self.tic = 0 self.tic_tic = time()
def log(): logger = get_logger('monitor') rdict = request.form # dictionary with keys as logging.LogRecord name = rdict.get('name', '') message = rdict.get('message', rdict.get('msg', 'NO_MESSAGE')) ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr) loglevel = int(rdict.get('levelno', logging.ERROR)) logger.log(loglevel, 'From: %s %s, %s' % (ip, name, message)) return 'OK'
def run(controller_name: str, ev_halt: th.Event): # Get local controller, logger here settings = get_settings(args, section=controller_name) settings['controller_name'] = controller_name # Create a separate logger for each controller with its own name. logger = get_logger(controller_name) logger = make_logger(logger=logger, **settings) logger.debug(controller_name + '\n' \ + pformat({s:v for s, v in settings.items() if s not in('username', 'password')})) # Dynamically import controller functions from submodule logger.debug('CTRL MODULE: %s', 'controllers.%s' % settings['import_path']) ctrl_module = importlib.import_module('controllers.%s' % settings['import_path']) get_controller = getattr(ctrl_module, 'get_controller') get_current_state = getattr(ctrl_module, 'get_current_state') update_controller = getattr(ctrl_module, 'update_controller') ctrl = get_controller(**settings) while not ev_halt.isSet(): try: start = datetime.now(pytz.utc) settings = get_settings(args, section=controller_name, write_settings=True) update_controller(ctrl, **settings) prev_end = start - 2 * timedelta(seconds=int(settings['interval'])) if settings['no_network']: state = None else: state = get_current_state(prev_end, start, **settings) if state is not None: logger.debug('State\n{}'.format(state)) action, *diag = ctrl.predict(state) feedback = diag[0] if len(diag) > 0 else -1 logger.info('Last feedback: {:.2f} \tSetpoint: {:.2f}'.format( feedback, action[0])) put_control_action(action, **settings) if settings['dry_run']: logger.info('Dry run finished. Halting.') ev_halt.set() else: time_taken = datetime.now( pytz.utc).timestamp() - start.timestamp() time_left = float(settings['interval']) - time_taken logger.info('Waiting for {:.1f}s'.format(time_left)) ev_halt.wait(time_left) except KeyboardInterrupt: logger.info('Keyboard interrupt 1. Halting.') ev_halt.set() except Exception as exc: logger.error(msg=exc, exc_info=True) if settings.get('dry_run', False): # If dry_run=True, (default assume=False) ev_halt.set() else: ev_halt.wait(float(settings['interval']) - \ (datetime.now(pytz.utc).timestamp() - start.timestamp()))
def __init__(self, conf_file, transport, queue): """ Thread that reads from a Queue and sends data to a remote server """ super(ThreadApi, self).__init__() self.port = transport self.running = False self.l = logs.get_logger('Api', conf_file=conf_file) self.queue = queue
def __init__(self, conf_file, transport, queue): """ Threads that read from the Serial port and saves data to a Queue. """ super(ThreadSerial, self).__init__() self.port = transport self.protocol = MCUComm() self.running = False self.l = logs.get_logger('Serial', conf_file=conf_file) self.queue = queue self.lr = logs.LogReader(conf_file)
def __init__(self, conf_file, transport, queues): """ Threads that read from the Serial port and saves data to a Queue. """ super(ThreadSerial, self).__init__() self.port = transport self.protocol = MCUComm() self.running = False self.l = logs.get_logger('Serial', conf_file=conf_file) self.queues = queues self.lr = logs.LogReader(conf_file)
def __init__(self, conf_file, transport, pivi_id, queue): """ Thread that reads from a Queue and sends data to a remote server """ super(ThreadUdp, self).__init__() self.protocol = ServerComm(pivi_id=pivi_id) self.port = transport self.mac = self.protocol.less_mac self.running = False self.l = logs.get_logger('Udp', conf_file=conf_file) self.queue = queue self.lr = logs.LogReader(conf_file)
def __init__(self, teacher: nn.Module, student: nn.Module, optimizer: optim.Optimizer, scheduler: SchedulerType, dataset: str, batch_size: int, num_workers: int, distill_ratio: float, temperature: float, data_path: Path, log_path: Path, checkpoint_path: Path): self.logger = get_logger(name=__name__, save_dir=str(log_path / 'logs')) self.logger.info('Initializing Knowledge Distillation Model Trainer.') assert get_single_model_device(teacher) == get_single_model_device(student), \ 'Teacher and student are expected to be on the same single device.' if dataset.upper() == 'CIFAR10': train_loader, eval_loader = get_cifar10_loaders( data_root=str(data_path), batch_size=batch_size, num_workers=num_workers, augment=True) else: raise NotImplementedError('Only CIFAR10 implemented.') self.teacher = teacher self.student = student self.optimizer = optimizer self.device = get_single_model_device( student) # Gets device of module if on a single device. self.loss_func = KnowledgeDistillationLoss(distill_ratio=distill_ratio, temperature=temperature) self.writer = SummaryWriter(str(log_path)) self.manager = CheckpointManager(student, optimizer, checkpoint_path, mode='max', save_best_only=True, max_to_keep=1) self.scheduler = scheduler self.train_loader = train_loader self.eval_loader = eval_loader self.epoch = 0 self.tic = 0 self.tic_tic = time() self.teacher.eval( ) # Set teacher to evaluation mode for more consistent output.
from argparse import Namespace import colorama from utils.logs import get_logger __all__ = ["refresh", "show", "cleanup", "disambiguate"] logger = get_logger("csn.cli") def refresh(args: Namespace): import difflib import operator from datetime import datetime from itertools import groupby from utils import paths from utils.caching import detect_protocol, load_cache from utils.wiki import coppermind_query, extract_relevant_info if args.dataset.lower() == "list": print( " characters refreshes the character data from coppermind.net" ) else: if args.dataset.lower() in ("coppermind", "characters", "wiki"): # remove cache and re-download data logger.info("Refreshing coppermind.net character data.") # cache already exists
) from utils.logs import get_logger from utils.paths import disambiguation_dir, gml_dir, json_dir from utils.simpletypes import CharacterOccurrence __all__ = [ "book_graph", "series_graph", "discrete_book_graph", "discrete_series_graph", "cosmere_graph", "save_network_gml", "save_network_json", ] logger = get_logger("csn.networks.interactions") RUN_SIZE = InteractionNetworkConfig.run_size nodes = {c.id: c.properties for c in characters} logger.debug("Created dictionary of nodes.") def save_disambiguation(book: str, disambiguation: dict): with (disambiguation_dir / book).with_suffix(".yml").open(mode="w") as f: yaml.dump(disambiguation, f, yaml.Dumper, default_flow_style=False, sort_keys=False)
def gui_main(addon_dir): #Initialize app var storage app = Application() logout_event = Event() connstate_event = Event() info_value_manager = InfoValueManager() app.set_var('logout_event', logout_event) app.set_var('login_last_error', ErrorType.Ok) app.set_var('connstate_event', connstate_event) app.set_var('exit_requested', False) app.set_var('info_value_manager', info_value_manager) #Check needed directories first data_dir, cache_dir, settings_dir = check_dirs() #Instantiate the settings obj settings_obj = SettingsManager() #Show legal warning show_legal_warning(settings_obj) #Start checking the version check_addon_version(settings_obj) #Don't set cache folder if it's disabled if not settings_obj.get_cache_status(): cache_dir = '' #Initialize spotify stuff ml = MainLoop() buf = BufferManager(get_audio_buffer_size()) callbacks = SpotimcCallbacks(ml, buf, app) sess = Session( callbacks, app_key=appkey, user_agent="python ctypes bindings", settings_location=settings_dir, cache_location=cache_dir, initially_unload_playlists=False, ) #Now that we have a session, set settings set_settings(settings_obj, sess) #Initialize libspotify's main loop handler on a separate thread ml_runner = MainLoopRunner(ml, sess) ml_runner.start() #Stay on the application until told to do so while not app.get_var('exit_requested'): #Set the exit flag if login was cancelled if not do_login(sess, addon_dir, "DefaultSkin", app): app.set_var('exit_requested', True) #Otherwise block until state is sane, and continue elif wait_for_connstate(sess, app, ConnectionState.LoggedIn): proxy_runner = ProxyRunner(sess, buf, host='127.0.0.1', allow_ranges=True) proxy_runner.start() log_str = 'starting proxy at port {0}'.format( proxy_runner.get_port()) get_logger().info(log_str) #Instantiate the playlist manager playlist_manager = playback.PlaylistManager(proxy_runner) app.set_var('playlist_manager', playlist_manager) #Set the track preloader callback preloader_cb = get_preloader_callback(sess, playlist_manager, buf) proxy_runner.set_stream_end_callback(preloader_cb) hide_busy_dialog() mainwin = windows.MainWindow("main-window.xml", addon_dir, "DefaultSkin") mainwin.initialize(sess, proxy_runner, playlist_manager, app) app.set_var('main_window', mainwin) mainwin.doModal() show_busy_dialog() #Playback and proxy deinit sequence proxy_runner.clear_stream_end_callback() playlist_manager.stop() proxy_runner.stop() buf.cleanup() #Join all the running tasks tm = TaskManager() tm.cancel_all() #Clear some vars and collect garbage proxy_runner = None preloader_cb = None playlist_manager = None mainwin = None app.remove_var('main_window') app.remove_var('playlist_manager') gc.collect() #Logout if sess.user() is not None: sess.logout() logout_event.wait(10) #Stop main loop ml_runner.stop() #Some deinitializations info_value_manager.deinit()
import networkx as nx from core.characters import characters from core.config import FamilyNetworkConfig from utils.logs import get_logger from utils.paths import gml_dir, json_dir __all__ = [ "create_graph", "extract_network_scopes", "save_network_gml", "save_network_json", ] logger = get_logger("csn.networks.family") def create_graph() -> nx.OrderedGraph: # define relevant fields for analysis FIELDS = FamilyNetworkConfig.relation_fields # create graph G = nx.OrderedGraph() # narrow down selection of characters relevant_chars = set(c for c in characters if any(f in c.relatives for f in FIELDS)) # restructure character list into efficient data structures to reduce complexity names = {c.name: c for c in relevant_chars}
def get_settings(parsed_args: Namespace, section: str = 'DEFAULT', write_settings=False) -> dict: # Combines command line flags with settings parsed from settings ini file. # Command line takes precedence. Values set in command line are not over- # written by ini file. # `settings` is a dictionary created from the commandline args + ini DEFAULT section # + the ini section specified in `section` argument. The values are not limited to # strings but are processed from the raw ini str values. settings = {} # try reading them, if error, return previous settings cfg = ConfigParser(allow_no_value=True) if parsed_args.settings is None: raise ValueError('No settings file provided.') cfg.read(parsed_args.settings) # Read DEFAULT settings, then other section, if provided sections = ('DEFAULT', ) if (section == 'DEFAULT' or section not in cfg) else ('DEFAULT', section) for (setting, value) in itertools.chain.from_iterable( [cfg[sec].items() for sec in sections]): # Only update settings which were not specified in the command line, # and which had non empty values # if (setting not in settings) or (settings.get(setting) is None): # Float conversion if setting in ('stepsize', 'window', 'interval', 'tolerance'): settings[setting] = float(value) # Integer conversion elif setting in ('logs_email_batchsize', 'port'): settings[setting] = int(value) # Tuple[int, int] conversion elif setting == 'bounds': settings[setting] = np.asarray( [tuple(map(float, value.split(',')))]) # Case-sensitive strings elif setting == 'target': settings[setting] = value.lower() # List of strings elif setting == 'controllers': settings[setting] = value.split(',') # String else: settings[setting] = value # Override with settings specified in command line (exclude None values) cmdline_args = { setting: value for setting, value in vars(parsed_args).items() if value is not None } settings.update(cmdline_args) # Add default settings if they did not have a value in the ini or command line. # These are settings that must be set in any case. for setting in DEFAULTS: if settings.get(setting) is None: settings[setting] = DEFAULTS[setting] if settings.get('output_settings') not in ('', None) and write_settings: try: with open(settings['output_settings'], 'w', newline='') as f: # Only these settings are written to the output settings csv keys = ['interval', 'stepsize', 'target', 'window', 'bounds'] writer = csv.DictWriter(f, fieldnames=keys) writer.writeheader() writer.writerow({k: settings.get(k, '') for k in keys}) except Exception as exc: get_logger().error(msg=exc, exc_info=True) if settings.get('username') in (None, '') or settings.get('password') in (None, ''): settings['username'], settings['password'] = get_credentials( filename=DEFAULTS['credentialfile']) return settings
from training.losses import getLoss from training import losses from tensorboardX import SummaryWriter os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" cv2.ocl.setUseOpenCL(False) cv2.setNumThreads(0) import numpy as np import albumentations as A from albumentations.pytorch import ToTensor logger = get_logger('Train', 'INFO') logger.info('Load args') parser = argparse.ArgumentParser("PyTorch Xview Pipeline") arg = parser.add_argument arg('--config', metavar='CONFIG_FILE', help='path to configuration file') arg('--workers', type=int, default=6, help='number of cpu threads to use') arg('--device', type=str, default='cpu' if platform.system() == 'Darwin' else 'cuda', help='device for model - cpu/gpu') arg('--gpu', type=str, default='0', help='List of GPUs for parallel training, e.g. 0,1,2,3') arg('--output-dir', type=str, default='weights/')
class UClient: """API to the micro service The client is mostly adapted to suit the needs of uworker. """ logger = get_logger("UClient", to_file=False, to_stdout=True) def __init__(self, apiroot, username=None, password=None, credentials_file=None, verbose=False, retries=200, time_between_retries=None): """ Init the api client. Args: apiroot (str): Root url to the micro service. project (str): Name of the processing project. username (str): Micro service username. password (str): Micro service password credentials_file (str): Path to file that contains micro service credentials. This is an alternative to passing username/password as arguments. verbose (bool): If True, print api responses. retries (int): Retry the requests to the micro service at most This many times. time_between_retries (int): Number of seconds between the retry requests. """ self.uri = apiroot.strip('/') self.verbose = verbose self.credentials = self._get_credentials(username, password, credentials_file) self.token = None self.retries = retries if time_between_retries is None: self.time_between_retries = [ min(pow(3, v), 300) for v in range(retries) ] else: self.time_between_retries = [time_between_retries] * retries def get_project_uri(self, project): if not validate_project_name(project): raise UClientError('Unsupported project name') return self.uri + '/v4/{}'.format(project) def renew_token(self): """ Renew token for token based authorization. """ url = self.uri + "/token" auth = (self.credentials['username'], self.credentials['password']) r = self._call_api(url, renew_token=False, auth=auth) self.token = r.json()['token'] def _load_credentials(self, filename="credentials.json"): """ Load credentials from credentials file. Not very secure. """ with open(filename) as fp: credentials = json.load(fp) if self.verbose: print("loaded credentials from '{}'".format(filename)) return credentials def _get_credentials(self, username, password, credentials_file): """ Get credentials from arguments or file. If both file and user has been supplied, use the manually entered user and password. """ if username is not None: return {"username": username, "password": password} elif credentials_file is not None: return self._load_credentials(credentials_file) else: return None def get_job_list(self, project): """Request list of jobs from server.""" return self._call_api(self.get_project_uri(project) + "/jobs") def fetch_job(self, job_type=None, project=None): """Request an unprocessed job from server.""" if project: url = self.get_project_uri(project) + "/jobs/fetch" else: url = self.uri + '/v4/projects/jobs/fetch' if job_type: url += '?{}'.format(urllib.parse.urlencode({'type': job_type})) try: return self._call_api(url) except UClientError as err: if err.status_code != 404: raise def claim_job(self, url, worker_name): """Claim job from server""" # TODO: Worker node info return self._call_api(url, 'PUT', json={"Worker": worker_name}) def update_output(self, url, output): """Update output of job.""" return self._call_api(url, 'PUT', json={'Output': output}, headers={'Content-Type': "application/json"}) def update_status(self, url, status, processing_time=None): """Update status of job.""" data = {'Status': status, 'ProcessingTime': processing_time} return self._call_api(url, 'PUT', json=data, headers={'Content-Type': "application/json"}) def _call_api(self, url, method='GET', renew_token=True, auth=None, **kwargs): """Call micro service. Returns: r (requests.Response): The api response. Raises: UClientError: When api call failes. """ if auth is None: auth = self.auth response = None error = None is_retry = False retries = self.retries for attempt in range(retries + 1): if is_retry: sleep(self.time_between_retries[attempt - 1]) try: response = getattr(requests, method.lower())(url, auth=auth, **kwargs) break except Exception as err: self.logger.warning( "Request to {0} raised {3} (attempt {1}/{2})".format( url, attempt + 1, retries + 1, err)) is_retry = True error = err if response is None: raise UClientError('API call to {} failed: {}'.format(url, error)) if self.verbose: print(response.text) if renew_token and response.status_code == 401: self.renew_token() return self._call_api(url, method=method, renew_token=False, **kwargs) if response.status_code > 299: raise UClientError(response.reason, response.status_code) return response @property def auth(self): if not self.credentials: raise UClientError('No credentials provided') if not self.token: self.renew_token() return (self.token, '')
help='Location of settings file.', default=DEFAULTS['settings']) parser.add_argument('-l', '--logs', type=str, required=False, default=None, help='Location of file to write logs to.') parser.add_argument('-p', '--port', type=int, required=False, default=None, help='Port number of server to listen on.') parser.add_argument('--host', type=str, required=False, default=None, help='Host address of server to listen on. e.g. 0.0.0.0') parser.add_argument('-v', '--verbosity', type=str, required=False, default=None, help='Verbosity level.', choices=('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')) parser.add_argument('-m', '--message', type=str, required=False, default=None, help=('Compose a test INFO message to send to a running monitor.')) return parser if __name__ == '__main__': parser = make_arguments() args = parser.parse_args() if args.message is not None: settings = get_settings(args, section='DEFAULT') logger = get_logger('monitor') logger = make_logger(enable=('stream', 'file', 'email', 'http'), logger=logger, **settings) logger.info(args.message) else: settings = get_settings(args, section='MONITOR') logger = get_logger('monitor') logger = make_logger(enable=('stream', 'file'), logger=logger, **settings) logger.info('Started monitoring on %s:%s ...' % (settings['host'], settings['port'])) dapp.run_server(host=settings['host'], port=settings['port'], debug=True)
from pathlib import Path import cv2 from turbojpeg import TurboJPEG # import gdcm import zipfile from io import StringIO # conda install gdcm -c conda-forge import multiprocessing from concurrent.futures import ThreadPoolExecutor PATH = '/Users/dhanley/Documents/rsnastr' \ if platform.system() == 'Darwin' else '/data/rsnastr' os.chdir(PATH) from utils.logs import get_logger logger = get_logger('Create folds', 'INFO') TYPE = 'ip' # 'bsb' BASE_PATH = f'{PATH}/data' JPEG_PATH = f'{PATH}/data/jpeg{TYPE}' try: jpeg = TurboJPEG() except: logger.info('Failed to load turbojpeg') logger.info(f'Home path list \n{os.listdir(BASE_PATH )}') def turbodump(f, img): # encoding BGR array to output.jpg with default settings. out_file = open(f, 'wb') out_file.write(jpeg.encode(img[:, :, ::-1]))
import mwparserfromhell as mwp from mwparserfromhell.nodes.template import Template from mwparserfromhell.nodes.wikilink import Wikilink from .config import WikiConfig from utils.constants import books, demonyms, nations, species, titles from utils.datastructures import CharacterLookup from utils.logs import get_logger from utils.regex import possession, punctuation_infix from utils.wiki import coppermind_query, extract_relevant_info __all__ = ["Character", "characters", "lookup"] logger = get_logger("csn.core.characters") class Character: """representation of a character in the Cosmere.""" def __init__(self, query_result: dict): """construct character from coppermind.net api query results.""" page = extract_relevant_info(query_result) self._keep: bool = True self._pageid: int = page["pageid"] infobox = self._parse_infobox(page) self.name: str = page["title"] self.common_name: str = infobox.pop("common_name", "")
PATH = '/Users/dhanley/Documents/rsnastr' \ if platform.system() == 'Darwin' else '/data/rsnastr' os.chdir(PATH) from utils.logs import get_logger pd.set_option('display.max_columns', 100) pd.set_option('display.max_rows', 1000) pd.set_option('display.width', 1000) parser = argparse.ArgumentParser() arg = parser.add_argument arg("--subfile", default='sub/submission.csv', type=str) arg("--testfile", default='data/test.csv.zip', type=str) args = parser.parse_args() logger = get_logger('LSTM', 'INFO') sub = pd.read_csv(args.subfile) test = pd.read_csv(args.testfile) logger.info(f'Submission shape : {sub.shape}') logger.info(f'Test shape : {test.shape}') def clean_sub(sub, test): # Create the necessary meta data subtmp = sub.copy() subtmp['iid'] = subtmp.id.str.split('_').str[0] subtmp['Study'] = subtmp.id.str.split('_').str[0] subtmp = subtmp.merge(test[['StudyInstanceUID', 'SOPInstanceUID']], left_on='Study', right_on='SOPInstanceUID',
def __init__(self): self.logger = get_logger('bot') self.evernote = Evernote(title_prefix='[TELEGRAM BOT]') self.telegram = BotApi(config['telegram']['token'])
def setUp(self): self.log = logs.get_logger('unittest', to_file=False, to_stdout=True)
from viberbot import Api from viberbot.api.bot_configuration import BotConfiguration from viberbot.api.viber_requests import (ViberConversationStartedRequest, ViberFailedRequest, ViberMessageRequest, ViberSubscribedRequest) from utils import logs import processing PORT = int(getenv('PORT', 80)) TOKEN = getenv('TOKEN') WEBHOOK_URL = getenv('WEBHOOK_URL') # Configure logging logger = logs.get_logger('bot-main') app = Flask(__name__) viber = Api(BotConfiguration(name='VyatSU Bot', avatar='', auth_token=TOKEN)) @app.route('/', methods=['POST']) def incoming(): viber_request = viber.parse_request(request.get_data(as_text=True)) if isinstance(viber_request, ViberMessageRequest): processing.process_message_request(viber_request, viber) elif isinstance(viber_request, ViberConversationStartedRequest): processing.process_conversation_started_request(viber_request, viber) elif isinstance(viber_request, ViberSubscribedRequest): processing.process_subscribe_request(viber_request, viber)
def __init__(self, bot_token, download_dir=None, *, loop=None): logger = get_logger('downloader') super().__init__(download_dir, logger=logger, loop=loop) self.telegram_api = BotApi(bot_token)
if os.path.exists(template_dir): info['template_path'] = template_dir return info modules = [ 'admin', 'bot', ] loaded_modules = [get_module_info(m) for m in modules] middlewares = [] for m in loaded_modules: if m.get('middlewares'): middlewares.extend(m['middlewares']) app = aiohttp.web.Application(middlewares=middlewares) template_path_list = [] for module_info in loaded_modules: if module_info.get('template_path'): template_path_list.append(module_info['template_path']) if module_info.get('urls'): for url_scheme in module_info['urls']: app.router.add_route(*url_scheme) aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(template_path_list)) app.logger = get_logger('bot') bot = EvernoteBot(config['telegram']['token'], 'evernoterobot') bot.config = config # FIXME: app.bot = bot
logger = make_logger(**default_settings) logger.info('Starting script: %s %s' % (sys.executable, ' '.join(sys.argv))) threads = [] ev_halt = th.Event() for controller_name in default_settings['controllers']: thread = th.Thread(target=run, daemon=False, kwargs=dict(controller_name=controller_name, ev_halt=ev_halt)) thread.start() logger.info('%s thread started.' % controller_name) # Wait for threads to finish, or interrupt them in case of error/input try: for thread in threads: thread.join() except KeyboardInterrupt: logger.info('Keyboard interrupt 0. Halting.') ev_halt.set() for thread in threads: thread.join(timeout=2.) # Exceptions during settings parsing, thread creation except Exception as exc: logger = get_logger() logger.exception(msg=exc, exc_info=True) logger.critical(msg='Could not start script.') exit(-1)