if len(sys.argv) == 1: parser.print_help() sys.exit(1) module = None server = None context = None targets = [] server_port_dict = {'http': 80, 'https': 443} args = parser.parse_args() if args.verbose: setup_debug_logger() logger = CMEAdapter(setup_logger()) if not os.path.exists('data/cme.db'): logger.error( 'Could not find CME database, did you run the setup_database.py script?' ) sys.exit(1) # set the database connection to autocommit w/ isolation level db_connection = sqlite3.connect('data/cme.db', check_same_thread=False) db_connection.text_factory = str db_connection.isolation_level = None db = CMEDatabase(db_connection) if args.cred_id: try:
if len(sys.argv) == 1: parser.print_help() sys.exit(1) module = None server = None context = None targets = [] server_port_dict = {'http': 80, 'https': 443} args = parser.parse_args() if args.verbose: setup_debug_logger() logger = CMEAdapter(setup_logger()) if not os.path.exists('data/cme.db'): logger.error('Could not find CME database, did you run the setup_database.py script?') sys.exit(1) # set the database connection to autocommit w/ isolation level db_connection = sqlite3.connect('data/cme.db', check_same_thread=False) db_connection.text_factory = str db_connection.isolation_level = None db = CMEDatabase(db_connection) if args.cred_id: try: c_id, credtype, domain, username, password = db.get_credentials(filterTerm=args.cred_id)[0] args.username = [username]
import core.logger as log from sklearn.model_selection import KFold, StratifiedKFold from model.densenet import * from model.resnet import * from model.senet import * from core.mixup import Mixup, OneHotCrossEntropy from core.snap_scheduler import SnapScheduler from tqdm import tqdm from collections import defaultdict from copy import deepcopy import argparse import logging import gc logger = logging.getLogger() logger, RUN_DIR = log.setup_logger(logger) pretrained_models = { 'resnet18': resnet18, 'resnet34': resnet34, 'resnet50': resnet50, 'resnet101': resnet101, 'resnet152': resnet152, 'densenet121': densenet121, 'densenet169': densenet169, 'densenet201': densenet201, 'densenet161': densenet161, 'senet18': se_resnet18, 'senet34': se_resnet34, 'senet50': se_resnet50, 'senet101': se_resnet101,
"""Module to help create generic ppol of thread workers. The goal I had in mind was to 2 spawn a fixed number of workers and have them reused for running tasks. Reference: http://code.activestate.com/recipes/576519-thread-pool-with-same-api-as-multiprocessingpool/ """ from collections import namedtuple from multiprocessing import JoinableQueue from multiprocessing import TimeoutError import multiprocessing as mp import queue from core import ProgressPrint from core import logger logger = logger.setup_logger() JobInfo = namedtuple('JobInfo', ['function', 'tag']) class Job(object): def __init__(self, func, args, kwargs): self._func = func self._args = args self._kwargs = kwargs self.out = None def process(self): logger.debug("Running Task for %s", self._args) self.out = self._func(*self._args, **self._kwargs) logger.debug("Finished Task for %s", self._args)
import os import json import asyncio from core.spider import Spider from core.gdocs import GDocs from core.logger import setup_logger from core.logger import main_logger as logger from core.utils import URLBuilder from settings import SEEN_FEEDS_PATH setup_logger() def main(): doc = GDocs() try: doc.connect() kws = doc.jobs_keywords() except Exception as e: logger.critical( '"{}" while getting keywords from google docs. Exiting'.format(e), exc_info=True) else: if not kws: logger.error('no keywords were found. Nothing to do. Exiting') return logger.debug('found keywords: "{}"'.format(kws))