예제 #1
0
def main():
    configuration.load_components_info()
    configuration.load_default_config()

    import cliargparse
    cliargs = cliargparse.parse_cli_args()

    # This must be done *before* checking for existing processes, because a
    # configuration file may haven't been set in the command arguments
    configfile = configuration.set_configuration_file(cliargs)
    configuration.set_update_only(cliargs)

    if dbus:
        check_existing_processes(configfile)

    configuration.load_configuration()

    import logger
    logger.set_logger(cliargs)

    # Make sure the main thread has a known name
    threading.current_thread().setName(configuration.MAIN_THREAD_NAME)
    sys.excepthook = handle_uncaught_exception
    # The application must crash also if an exception is raised in a secondary
    # thread, for example because if the thread has blocked the databases, it
    # won't release them otherwise; yes, they could be released in a finally
    # clause, but that would leave the database in an unknown state, which is
    # pretty bad
    install_thread_excepthook()
예제 #2
0
파일: bot.py 프로젝트: zhukov000/xProject
def init():
    """
        Activate object for working: __config__, __db_data__
    """
    __config__.read('config.ini')
    set_logger(
        'xProject.log',
        mode=__config__['logger']['mode'],
        level=__config__['logger']['level']
    )
    load_db()
    update_db(int(__config__['db_csv']['timeout']))
예제 #3
0
def main(host, db_type, port, file_name, mysql_host, mysql_port,
         db, user, password, target_url, interval, log_level):
    set_logger(getattr(logging, log_level))
    proxy_list = load_file(file_name)
    if db_type == "sqlite":
        queue = SqliteQueue()
    else:
        queue = MysqlQueue(mysql_host, mysql_port, db, user, password)
    checker = ProxyCheck(proxy_list, queue, target_url)
    relay_handler = HttpRelayHandler(queue, (host, port))
    scheldurer = Manager(checker, queue, relay_handler, interval)
    scheldurer.run()
예제 #4
0
파일: main.py 프로젝트: ytkn/scheduling
def main():
    logger.set_logger()
    from wsgiref import simple_server
    app = falcon.API(middleware=[CORSMiddleware()])

    app.add_route("/instances", InstanceListResource())
    app.add_route("/instances/{file}", InstanceResource())
    app.add_route(
        "/instances/{file}/solutions", SolutionListResource())
    app.add_route(
        "/instances/{file}/solutions/{solution_name}", SolutionResource())
    httpd = simple_server.make_server("127.0.0.1", 8000, app)
    httpd.serve_forever()
    def __init__(self,
                 infile,
                 refgenome,
                 pattern,
                 output_name_pattern,
                 window_size=200):
        self._infile = pathlib.Path(infile)
        if not self._infile.exists():
            raise Exception('Can not find input file.')

        self._refgenome = pathlib.Path(refgenome)
        if not self._refgenome.exists():
            raise Exception('Can not find refgenome file')

        self._pattern = pattern
        if '/' in self._pattern:
            self._length_of_pattern = len(self._pattern) - 2
        else:
            self._length_of_pattern = len(self._pattern)

        self._window_size = window_size
        if not self._window_size % 2 == 0:
            raise Exception('Please specify an even number for window_size')

        self._output_name_pattern = output_name_pattern

        self._dict_apalfc = None
        self._opened_ref = None

        self._lgr = logger.set_logger(self._infile.name[:-3] + 'log')
예제 #6
0
def main():
    parser = ArgumentParser()

    parser.add_argument("--lang", required=True, help="language")
    parser.add_argument(
        "--labels",
        required=True,
        help="path to file with Wikidata labels and aliases for language LANG")
    parser.add_argument(
        "--sitelinks",
        required=True,
        help="path to file with Wikidata sitelinks for language LANG")
    parser.add_argument(
        "--wd-graph",
        required=True,
        help="path to Wikidata graph (represented with adjacency lists)")
    parser.add_argument("--wp-sections",
                        required=True,
                        help="path to file with Wikipedia sections")
    parser.add_argument("--output", required=True, help="path to output file")

    args = parser.parse_args()

    log = set_logger("logs/match_{}.{}".format(args.lang, int(time.time())))

    do_process(args, log)
    def __init__(self, infile, refgenome, pattern, output_name_pattern, window_size=200):
        self._infile = pathlib.Path(infile)
        if not self._infile.exists():
            raise Exception('Can not find input file.')

        self._refgenome = pathlib.Path(refgenome)
        if not self._refgenome.exists():
            raise Exception('Can not find refgenome file')

        self._pattern = pattern
        if '/' in self._pattern:
            self._length_of_pattern = len(self._pattern) - 2
        else:
            self._length_of_pattern = len(self._pattern)

        self._window_size = window_size
        if not self._window_size % 2 == 0:
            raise Exception('Please specify an even number for window_size')

        self._output_name_pattern = output_name_pattern

        self._dict_apalfc = None
        self._opened_ref = None

        self._lgr = logger.set_logger(self._infile.name[:-3]+'log')
 def __init__(self, reference, infilename, sample_name, window_size):
     self._reference = reference
     self._infilename = pathlib.Path(infilename)
     self._sample_name = sample_name
     self._window_size = window_size
     self._cached_dict = None
     # the name of the input is supposed to be XXXXXX.bed
     self._lgr = logger.set_logger(self._infilename.name[:-4] + '.log')
 def __init__(self, reference, infilename, sample_name, window_size):
     self._reference = reference
     self._infilename = pathlib.Path(infilename)
     self._sample_name = sample_name
     self._window_size = window_size
     self._cached_dict = None
     # the name of the input is supposed to be XXXXXX.bed
     self._lgr = logger.set_logger(self._infilename.name[:-4] + '.log')
예제 #10
0
 def __init__(self, rcTable, window_size, hasHeader, delrc):
     myclasses.RCTableCacher.__init__(self, rcTable, hasHeader)
     self._window_size = window_size
     self._sorted_dict = None
     self._nodup_dict = None
     self._merged_dict = None
     self._delrc = delrc
     self._lgr = logger.set_logger(self._rcTable.name[:-4] + '.log')
예제 #11
0
파일: main.py 프로젝트: chenz97/hats
def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    config = get_args()
    logger = set_logger(config)
    dataset = StockDataset(config)
    config.num_relations = dataset.num_relations
    config.num_companies = dataset.num_companies

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    model_name = config.model_type
    exp_name = '%s_%s_%s_%s_%s_%s_%s_%s'%(config.data_type, model_name,
                                        str(config.test_phase), str(config.test_size),
                                        str(config.train_proportion), str(config.lr),
                                        str(config.dropout), str(config.lookback))
    if not (os.path.exists(os.path.join(config.save_dir, exp_name))):
        os.makedirs(os.path.join(config.save_dir, exp_name))

    sess = tf.Session(config=run_config)
    model = init_prediction_model(config)
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess.run(init)

    def model_summary(logger):
        model_vars = tf.trainable_variables()
        slim.model_analyzer.analyze_vars(model_vars, print_info=True)
    model_summary(logger)
   
    #Training 
    evaluator = Evaluator(config, logger)
    trainer = Trainer(sess, model, dataset, config, logger, evaluator)
    trainer.train()
    
    #Testing
    loader = tf.train.Saver(max_to_keep=None)
    loader.restore(sess, tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name)))
    print("saved at {}/{}".format(config.save_dir, exp_name))
    print("load best evaluation model")

    test_loss, report_all, report_topk = evaluator.evaluate(sess, model, dataset, 'test', trainer.best_f1['neighbors'])
    te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report_all
    logstr = 'EPOCH {} TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\
            .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe)
    logger.info(logstr)

    te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report_topk
    logstr = 'EPOCH {} TEST TopK \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\
            .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe)
    logger.info(logstr)

    #Print Log
    with open('%s_log.log'%model_name, 'a') as out_:
        out_.write("%d phase\n"%(config.test_phase))
        out_.write("%f\t%f\t%f\t%f\t%f\t%f\t%s\t%f\t%f\t%f\t%f\t%f\t%f\t%s\t%d\n"%(
            report_all[1], report_all[2], report_all[3], report_all[4], report_all[5], report_all[6], str(report_all[0]),
            report_topk[1], report_topk[2], report_topk[3], report_topk[4], report_topk[5], report_topk[6], str(report_topk[0]),
            trainer.best_f1['epoch']))
예제 #12
0
파일: controller.py 프로젝트: pborky/pywef
    def __init__(self, controllers, **kw):

        self._debug = kw.get('debug', False)
        
        self._init_exc_info = None
        self._worker = None
        # TODO: following is shit...
        #try:
        if kw.has_key('loggers'):
            for name, data in kw.get('loggers').items():                   
                file = data.get('file')
                fname = file.get('name')
                size = file.get('size')
                count = file.get('count')
                set_logger(name, fname, max_bytes = size, backup_count = count)
        
        if kw.has_key('exc_wrapper'):
            exc_wrapper = kw.get('exc_wrapper')
            log_setup = ExcInfoWrapper._logging
            log_setup['init'] = exc_wrapper.get('init', log_setup['init'])
            log_setup['call'] = exc_wrapper.get('call', log_setup['call'])


        if kw.has_key('monitor'):
            monitor = kw.get('monitor')
            force_restart = monitor.get('force_restart', True)
            self._monitor = Monitor(force_restart=force_restart)
            for i in monitor.get('track_files', []):
                self._monitor.track(i)
            self._monitor.start()
        
        
        #except:
        #    self._init_exc_info = ExcInfoWrapper()

        if FrontControllerWorker is None:
            self._init_exc_info = init_exc_info
        else:
            try:
                self._worker = FrontControllerWorker(**controllers)
            except:
                self._init_exc_info = ExcInfoWrapper()
예제 #13
0
파일: base.py 프로젝트: gizemayydin/newdog
 def __init__(self, model_dir: str, lr: float = 10e-3, batch_size: int = 4,
              max_seq_len: int = 128, verbose: bool = False, filter_results: bool = False,  **_):
     """Model dir will be a full path if the binary is present, and will
     be just the name of the "model_dir" if it is not."""
     super().__init__()
     self.filter_results = filter_results
     self.model_dir = model_dir
     self.lr = lr
     self.max_seq_len = max_seq_len
     self.batch_size = batch_size
     self.logger = set_logger(model_dir, verbose=verbose)
예제 #14
0
    def __init__(self,
                 make,
                 env_name,
                 replay_buffer_size=50000,
                 sample_size=2048,
                 seed=0,
                 is_primary=False,
                 path=None,
                 **kwargs):
        super(Worker, self).__init__()
        self.make = make
        self.env_name = env_name

        self.replay_buffer_size = replay_buffer_size
        self.sample_size = sample_size
        self.seed = seed
        self.kwargs = kwargs

        self.is_primary = is_primary
        self.daemon = True
        self.pipe, self.worker_pipe = multiprocessing.Pipe()

        from logger import set_logger
        if path is None and self.is_primary:
            # save the model if it's the primary
            import datetime
            date = str(datetime.datetime.now()).split('.')[0]
            path = f'/tmp/AWR/{env_name}_{date}'

        if not os.path.exists(path):
            os.makedirs(path)

        self.path = path

        if path is not None and path != 'tmp':
            set_logger(os.path.join(path, 'log.txt'))
            import logging
            self.print = logging.info
        else:
            self.print = print
        self.start()
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--input", required=True)
    parser.add_argument("--output", required=True)

    args = parser.parse_args()

    log = set_logger("logs/filter_res.{}".format(int(time.time())))

    do_filter(args, log)
    log.info("Filtering: finish")
 def __init__(self, ls_inputs, undefined, outputname):
     '''
     This app class takes a list of single-sample RC files and merge them into one.
     Default settings merge the list of files derived from defined masterlist.
     If you want to merge "Unidentified" UID_files, specify --undefined upon calling.
     '''
     self._ls_inputs = utils.list_inputs_from_file(ls_inputs)
     # self._ls_inputs contains a list of Path() objects that are through existing check
     self._undefined = undefined
     # default is True, when undefined is given in command line, it turns False
     self._outputname = outputname
     self._lgr = logger.set_logger('Merged_' + self._outputname[:-4] + '.log')
예제 #17
0
 def __init__(self, reference, infilename, window_size, deny_number):
     # infilename could be a path/file, pack them in a Path() object will be better
     self.infilename = pathlib.Path(infilename)
     if not self.infilename.exists():
         raise Exception('The input SAM does not exist!')
     self.reference = pathlib.Path(reference)
     if not self.reference.exists():
         raise Exception('The input reference does not exist!')
     self.window_size = window_size
     self.deny_number = deny_number
     # the name of the infile should be XXXXXX.sam
     self._lgr = logger.set_logger(self.infilename.name[:-4] + '.log')
예제 #18
0
def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    config = get_args()
    logger = set_logger(config)
    dataset = StockDataset(config)
    config.num_relations = dataset.num_relations
    config.num_companies = dataset.num_companies
    run_config = tf.ConfigProto(log_device_placement=False)
    run_config.gpu_options.allow_growth = True
    exp_name = '%s_%s_%s_%s_%s_%s_%s_%s' % (
        config.data_type, config.model_type, str(config.test_phase),
        str(config.test_size), str(config.train_proportion), str(
            config.lr), str(config.dropout), str(config.lookback))
    # save train file
    if not (os.path.exists(os.path.join(config.save_dir, exp_name))):
        os.makedirs(os.path.join(config.save_dir, exp_name))

    sess = tf.Session(config=run_config)
    model = init_prediction_model(config)
    init = tf.group(tf.global_variables_initializer(),
                    tf.local_variables_initializer())
    sess.run(init)

    def model_summary():
        model_vars = tf.trainable_variables()
        slim.model_analyzer.analyze_vars(
            model_vars,
            print_info=True)  # print the name and shapes of the variables

    model_summary()

    if 'graph' in config.model_type:
        evaluator = GEvaluator(config, logger)
        # trainer = GTrainer(sess, model, dataset, config, logger, evaluator)
    else:
        evaluator = Evaluator(config, logger)
        # trainer = Trainer(sess, model, dataset, config, logger, evaluator)


# trainer.train()

#Testing
    loader = tf.train.Saver(max_to_keep=None)
    loader.restore(
        sess,
        tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name)))
    print("saved at {}/{}".format(config.save_dir, exp_name))
    print("load best evaluation model")
    test_loss, report = evaluator.evaluate(sess, model, dataset, 'test', True)
    te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report
    logstr = 'TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\
            .format(test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe)
    logger.info(logstr)
 def __init__(self, reference, infilename, window_size, deny_number):
     # infilename could be a path/file, pack them in a Path() object will be better
     self.infilename = pathlib.Path(infilename)
     if not self.infilename.exists():
         raise Exception('The input SAM does not exist!')
     self.reference = pathlib.Path(reference)
     if not self.reference.exists():
         raise Exception('The input reference does not exist!')
     self.window_size = window_size
     self.deny_number = deny_number
     # the name of the infile should be XXXXXX.sam
     self._lgr = logger.set_logger(self.infilename.name[:-4] + '.log')
예제 #20
0
파일: controller.py 프로젝트: pborky/pywef
    def __init__(self, controllers, **kw):

        self._debug = kw.get('debug', False)

        self._init_exc_info = None
        self._worker = None
        # TODO: following is shit...
        #try:
        if kw.has_key('loggers'):
            for name, data in kw.get('loggers').items():
                file = data.get('file')
                fname = file.get('name')
                size = file.get('size')
                count = file.get('count')
                set_logger(name, fname, max_bytes=size, backup_count=count)

        if kw.has_key('exc_wrapper'):
            exc_wrapper = kw.get('exc_wrapper')
            log_setup = ExcInfoWrapper._logging
            log_setup['init'] = exc_wrapper.get('init', log_setup['init'])
            log_setup['call'] = exc_wrapper.get('call', log_setup['call'])

        if kw.has_key('monitor'):
            monitor = kw.get('monitor')
            force_restart = monitor.get('force_restart', True)
            self._monitor = Monitor(force_restart=force_restart)
            for i in monitor.get('track_files', []):
                self._monitor.track(i)
            self._monitor.start()

        #except:
        #    self._init_exc_info = ExcInfoWrapper()

        if FrontControllerWorker is None:
            self._init_exc_info = init_exc_info
        else:
            try:
                self._worker = FrontControllerWorker(**controllers)
            except:
                self._init_exc_info = ExcInfoWrapper()
예제 #21
0
 def __init__(self, ls_inputs, undefined, outputname):
     '''
     This app class takes a list of single-sample RC files and merge them into one.
     Default settings merge the list of files derived from defined masterlist.
     If you want to merge "Unidentified" UID_files, specify --undefined upon calling.
     '''
     self._ls_inputs = utils.list_inputs_from_file(ls_inputs)
     # self._ls_inputs contains a list of Path() objects that are through existing check
     self._undefined = undefined
     # default is True, when undefined is given in command line, it turns False
     self._outputname = outputname
     self._lgr = logger.set_logger('Merged_' + self._outputname[:-4] +
                                   '.log')
예제 #22
0
def main():
    config = get_args()
    logger = set_logger(config)
    dataset = StockDataset(config)
    config.num_relations = dataset.num_relations
    config.num_companies = dataset.num_companies
    run_config = tf.ConfigProto(log_device_placement=False)
    run_config.gpu_options.allow_growth = True
    exp_name = '%s_%s_%s_%s_%s_%s_%s_%s'%(config.data_type, config.model_type,
                                        str(config.test_phase), str(config.test_size),
                                        str(config.train_proportion), str(config.lr),
                                        str(config.dropout), str(config.lookback))
    # save train file
    if not (os.path.exists(os.path.join(config.save_dir, exp_name))):
        os.makedirs(os.path.join(config.save_dir, exp_name))

    sess = tf.Session(config=run_config)
    model = init_prediction_model(config)
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    sess.run(init)

    def model_summary():
        model_vars = tf.trainable_variables()
        slim.model_analyzer.analyze_vars(model_vars, print_info=True) # print the name and shapes of the variables
    model_summary()

    if config.mode == 'train':
        if 'graph' in config.model_type:
            evaluator = GEvaluator(config, logger)
            trainer = GTrainer(sess, model, dataset, config, logger, evaluator)
        else:
            evaluator = Evaluator(config, logger)
            trainer = Trainer(sess, model, dataset, config, logger, evaluator)
    trainer.train()

    #Testing
    loader = tf.train.Saver(max_to_keep=None)
    loader.restore(sess, tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name)))
    print("load best evaluation model")
    test_loss, report = evaluator.evaluate(sess, model, dataset, 'test')
    te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt = report
    logstr = 'EPOCH {} TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f}'\
            .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt)
    logger.info(logstr)

    with open('%s_log.log'%config.GNN_model+'_'+config.model_type+'_'+config.data_type+'_'+config.price_model+'_'+str(config.test_phase), 'a') as out_:
        out_.write("%d phase\n"%(config.test_phase))
        out_.write("%f\t%f\t%f\t%f\t%f\t%s\t%d\n"%(
            report[1], report[2], report[3], report[4], report[5], str(report[0]),
            trainer.best_f1['epoch']))
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument("--dump",
                        required=True,
                        help="path to Wikipedia dump file")
    parser.add_argument("--output", required=True, help="path to output file")

    args = parser.parse_args()

    log = set_logger("logs/process_wikipedia_{}.{}".format(
        args.lang, int(time.time())))

    process_wikipedia(args, log)
예제 #24
0
def main():
    args = parser.parse_args()
    log = logger.set_logger(LOG_FILENAME,
                            debug_mode=args.log_level,
                            print_to_screen=args.verbose)
    log.info(f'booting bot v{VERSION} on {platform()}')
    log.info('\n\t'.join([
        'arguments:', f'log-level: {args.log_level}',
        f'verbose: {args.verbose}', f'mode: {args.mode}',
        f'configuration filename: {args.config_file}'
    ]))
    bot = bot_server(config_file=args.config_file)
    if not bot.is_valid():
        log.error('could not create bot server, aborting')
    else:
        log.info('server created successfuly')
        bot.run(args.mode)
    log.info('shutting down.')
예제 #25
0
parser.add_argument('--lam',
                    default=1e-4,
                    type=float,
                    help='L2 regularization')
parser.add_argument('--seed', default=0, type=int, help='seed for hashing')
parser.add_argument('--subsample_max',
                    default=60,
                    type=int,
                    help='max subsampled training labels')
parser.add_argument('--K',
                    default=50,
                    type=int,
                    help='top K reviewers per paper considered for assignment')

args = parser.parse_args()
logger = set_logger("assignment")
logger.info(args)


def precision_evaluate(w, X, y):
    preds = X.dot(w)
    preds = np.reshape(preds, (num_reviewer, int(X.shape[0] / num_reviewer)))
    y = np.reshape(y, (num_reviewer, int(X.shape[0] / num_reviewer)))
    indices = np.argsort(preds, 1)
    prec_at_k = np.zeros((preds.shape[0], 10))
    for i in range(preds.shape[0]):
        out = y[i, indices[i, -1:-11:-1]] == 1
        prec_at_k[i] = np.cumsum(out).astype(float) / np.arange(1, 11)
    logger.info("top10papers: " + repr(prec_at_k.mean(0)))
    prec_top_paper = prec_at_k.mean(0)
예제 #26
0
파일: dataset.py 프로젝트: chenz97/hats
def main():
    config = get_args()
    logger = set_logger(config)
    dataset = StockDataset(config)
예제 #27
0
                    help='print info to screen')

config.args = parser.parse_args()

config.args.torch_device = 'cuda' if torch.cuda.is_available(
) and not config.args.ignore_cuda else 'cpu'
config.args.use_cuda = config.args.torch_device == 'cuda'

# create run dir and setup
if not config.args.ignore_timestamp:
    config.args.runname += datetime.datetime.now().strftime('_%Y%m%d_%H%M%S')

run_dir = os.path.join(config.args.save_dir, config.args.runname)

os.makedirs(run_dir, exist_ok=True)
logger.set_logger(
    os.path.join(run_dir, 'log_' + str(config.args.runname) + '.log'))
configfile = os.path.join(run_dir,
                          'conf_' + str(config.args.runname) + '.config')

config.log.info(f'==> Created subdir for run at: {run_dir}')

# save configuration parameters
with open(configfile, 'w') as f:
    for arg in vars(config.args):
        f.write('{}: {}\n'.format(arg, getattr(config.args, arg)))

config.log.info('==> Loading dataset...')

config.cast_graph = ds.get_cast_graph()
train_loader, eval_loader, test_loader = ds.get_loaders()
 def __init__(self, rcTable, hasHeader):
     myclasses.RCTableCacher.__init__(self, rcTable, hasHeader)
     self._rcMatrix = None
     self._sumList_perGene = None
     self._sumList_perApA = None
     self._lgr = logger.set_logger(self._rcTable.name[:-4] + '.log')
예제 #29
0
    @return: web app
    """
    ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
    ssl_ctx.load_cert_chain(os.path.join("certificates", "server.crt"),
                            os.path.join("certificates", "server.key"))

    return tornado.web.Application([(r"/alke", WebSocketHandler)],
                                   debug=True,
                                   websocket_ping_interval=0,
                                   websocket_max_message_size=1000000000,
                                   ssl_options=ssl_ctx)


if __name__ == "__main__":
    # set up session logger
    set_logger(level='debug')
    # set up web application
    app = make_app()

    app.listen(5000,
               ssl_options={
                   "certfile": os.path.join("certificates", "server.crt"),
                   "keyfile": os.path.join("certificates", "server.key"),
               })

    # instantiate tf model before running the inference to prevent slow loading times
    instantiate_model()

    # inference first image -> otherwise first frame processed slowly
    img_first = cv2.imread("first_frame_instance_model.jpg")
    process_image(img_first)
import argparse
from logger import set_logger

parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', default='./results/', type=str, help='output dir')
parser.add_argument('--lam', default=1e-4, type=float, help='L2 regularization')
parser.add_argument('--num_seeds_X', default=1, type=int, help='seed for hashing')
parser.add_argument('--num_seeds_results', default=1, type=int, help='seed for attack sampling')
parser.add_argument('--subsample_max', default=60, type=int, help='max subsampled training labels')
parser.add_argument('--K', default=50, type=int, help='top K reviewers per paper considered for assignment')
parser.add_argument('--num_sim', default=10, type=int, help='the number of samples in each bins')
parser.add_argument('--L', default=0, type=int, help='the colluding size for attack evalaution')
parser.add_argument('--cheat_mode', default="white_box", type=str, help="white_box / black_box /simple_black_box")
args = parser.parse_args()

logger = set_logger("attack results evaluation")
logger.info(args)

num_paper, num_reviewer, _, _, hashed_ratio = get_global_variable()
num_sim = args.num_sim
X_seeds = range(args.num_seeds_X)
K = args.K
lam = args.lam
subsample_max = args.subsample_max
cheat_mode = args.cheat_mode
L = args.L

X_csr_s = []
H_inv_s = []
y, y_train = load_y(hashed_ratio, logger, subsample_max=subsample_max)
preds_s = []
예제 #31
0
파일: bot.py 프로젝트: sapunold/ASFBot
import argparse
import logger
import requests.exceptions
from ASFConnector import ASFConnector

_REGEX_CDKEY = re.compile('\w{5}-\w{5}-\w{5}')
_REGEX_COMMAND_BOT_ARGS = '^[/!]\w+\s*(?P<bot>\w+)?\s+(?P<arg>.*)'
_REGEX_COMMAND_RAW = '^[/!](?P<input>(?P<command>\w+).*)'
_REGEX_COMMAND = '^[/!]\w+\s*(?P<bot>\w+)?'
_ENV_TELEGRAM_BOT_TOKEN = "TELEGRAM_BOT_TOKEN"
_ENV_TELEGRAM_USER_ALIAS = "TELEGRAM_USER_ALIAS"
_ENV_ASF_IPC_HOST = "ASF_IPC_HOST"
_ENV_ASF_IPC_PORT = "ASF_IPC_PORT"
_ENV_ASF_IPC_PASSWORD = "******"

LOG = logger.set_logger('ASFBot')

parser = argparse.ArgumentParser()

parser.add_argument("-v",
                    "--verbosity",
                    help="Defines log verbosity",
                    choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
                    default='INFO')
parser.add_argument("--host",
                    help="ASF IPC host. Default: 127.0.0.1",
                    default='127.0.0.1')
parser.add_argument("--port",
                    help="ASF IPC port. Default: 1242",
                    default='1242')
parser.add_argument("--password", help="ASF IPC password.", default=None)
예제 #32
0
                    help='max subsampled training labels')
args = parser.parse_args()

num_paper, num_reviewer, input_dir, max_pc_quota, hashed_ratio = get_global_variable(
)
seeds = range(args.num_seeds)
L_attack = args.L_attack
K = args.K
lam = args.lam
subsample_max = args.subsample_max
cheat_mode = args.cheat_mode
c_train = 2.0 / (num_reviewer * num_paper)

logger = set_logger(
    "detect_tpr",
    "{}/detect_tpr/log_detect_tpr_collusion_{}_top_{}_{}_lam_{}_subsample_max_{}_seed_{}.txt"
    .format(args.output_dir, L_attack, K, cheat_mode, lam, subsample_max,
            args.seed))
logger.info(args)

#1. init data
X_csr_s = []
H_inv_s = []
y, y_train = load_y(hashed_ratio, logger, subsample_max=subsample_max)
preds_s = []
for seed in seeds:
    X_csr, H_inv = load_X_and_H_inv(hashed_ratio, seed, logger, lam)
    preds = load_preds(X_csr,
                       y_train,
                       H_inv,
                       hashed_ratio,
예제 #33
0
 def setUpClass(cls):
     LOG_NAME = 'QuakeSounds_Bot.test'
     logger.set_logger(LOG_NAME)
     cls.db = Database(provider='sqlite')
예제 #34
0
파일: main.py 프로젝트: Torpus/ouroboros
            current_image = api_client.inspect_image(
                running_container['Config']['Image'])
            try:
                latest_image = image.pull_latest(current_image)
            except docker.errors.APIError as e:
                logging.error(e)
                continue
            # If current running container is running latest image
            if not image.is_up_to_date(current_image['Id'],
                                       latest_image['Id']):
                logging.info(('{} will be updated').format(
                    container.get_name(running_container)))
                # new container object to create new container from
                new_config = container.NewContainerProperties(
                    running_container, latest_image['RepoTags'][0])
                container.stop(running_container)
                container.remove(running_container)
                new_container = container.create_new_container(
                    new_config.__dict__)
                container.start(new_container)
                image.remove(current_image)
        logging.info('All containers up to date')


if __name__ == "__main__":
    set_logger('debug')
    schedule.every(defaults.INTERVAL).seconds.do(main)
    while True:
        schedule.run_pending()
        time.sleep(defaults.INTERVAL - 5)
예제 #35
0
파일: start.py 프로젝트: Maxsparrow/SAL
def main():
    lg.set_logger('imdbparser')

    imdb = IMDBGetter(IMDB_FTP_URL)

    imdb.get_file(QUOTES_FILENAME)
 def __init__(self, rcTable, hasHeader):
     myclasses.RCTableCacher.__init__(self, rcTable, hasHeader)
     self._rcMatrix = None
     self._sumList_perGene = None
     self._sumList_perApA = None
     self._lgr = logger.set_logger(self._rcTable.name[:-4] + '.log')
예제 #37
0
        """
        self.gui.setButton(VideoClient.CONNECT_BUTTON,
                           f"In a call with {nickname}")

    def display_connect(self):
        """
        This function will be called when a call ends. Changes the connect and hold buttons names to the default ones.
        :return:
        """
        self.gui.setButton(VideoClient.CONNECT_BUTTON,
                           VideoClient.CONNECT_BUTTON)
        self.gui.setButton(VideoClient.HOLD_BUTTON, VideoClient.HOLD_BUTTON)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Samtale')

    parser.add_argument('-log_level',
                        action='store',
                        nargs='?',
                        default='info',
                        choices=['debug', 'info', 'warning', 'error'],
                        required=False,
                        help='Indicate logging level')

    args = parser.parse_args()

    set_logger(args)
    VideoClient().start()
    _exit(0)