Ejemplo n.º 1
0
def main():
    args = parse_args()
    set_globals(args)
    init_logging(args.log)
    print(VCF.get_header(args.sample))
    CrypticVariant.write_contig_header(args.contig_info_output)
    annotate_contigs(args)
Ejemplo n.º 2
0
def main():
    init_logging()
    args = get_script_arguments()
    resize_images(input_base_dir=args.input_dir,
                  output_base_dir=args.output_dir,
                  ext=args.extension,
                  resize_dims=[int(v) for v in args.resize_dims.split(',')])
Ejemplo n.º 3
0
def main():
    # Setup: logging, notesdir
    ui = user_input()

    init_logging(ui.logfile, ui.debug)
    create_notes_dir(ui.notesdir, ui.strict)

    # user interface wiring
    if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] == '--debug'):
        render_task_list(query='pending',
                         dir=ui.notesdir,
                         ext=ui.ext,
                         fmt=ui.format)
    elif ui.list:
        render_task_list(query=ui.filter,
                         dir=ui.notesdir,
                         ext=ui.ext,
                         fmt=ui.format)
    elif ui.view:
        view_task(task_id=ui.task[0],
                  fmt=ui.format,
                  dir=ui.notesdir,
                  ext=ui.ext)
    else:
        edit_note(task=get_or_make_task(ui.task),
                  dir=ui.notesdir,
                  editor=ui.editor,
                  ext=ui.ext,
                  async=ui.async)
Ejemplo n.º 4
0
def main():
    """ The main function. It creates VmaaS application, servers, run everything."""

    vmaas_app = Application()

    server = tornado.httpserver.HTTPServer(vmaas_app)
    server.bind(PUBLIC_API_PORT)
    num_servers = int(os.getenv("MAX_VMAAS_SERVERS", MAX_SERVERS))
    server.start(num_servers)  # start forking here
    init_logging(num_servers)
    LOGGER.info("Starting (version %s).", VMAAS_VERSION)

    # The rest stuff must be done only after forking
    BaseHandler.db_cache = Cache()
    BaseHandler.updates_api = UpdatesAPI(BaseHandler.db_cache)
    BaseHandler.repo_api = RepoAPI(BaseHandler.db_cache)
    BaseHandler.cve_api = CveAPI(BaseHandler.db_cache)
    BaseHandler.errata_api = ErrataAPI(BaseHandler.db_cache)
    BaseHandler.dbchange_api = DBChange(BaseHandler.db_cache)

    vmaas_app.websocket_reconnect()
    vmaas_app.reconnect_callback = PeriodicCallback(
        vmaas_app.websocket_reconnect, WEBSOCKET_RECONNECT_INTERVAL * 1000)
    vmaas_app.reconnect_callback.start()

    IOLoop.instance().start()
Ejemplo n.º 5
0
def main():
    args = parse_args(sys.argv[1:])
    init_logging(args.log)

    try:
        contigs = pd.read_csv(args.contig_info, sep='\t',
                              low_memory=False).fillna('')
        de_results = pd.read_csv(args.de_results, sep='\t', low_memory=False)
        vafs = pd.read_csv(args.vaf_estimates, sep='\t', low_memory=False)
        vafs = vafs[['contig_id', 'TPM', 'mean_WT_TPM',
                     'VAF']].drop_duplicates()
        gene_filter = pd.read_csv(args.gene_filter, header=None, low_memory=False) if args.gene_filter != '' \
                                                                                   else pd.DataFrame()
    except IOError as exception:
        exit_with_error(str(exception), EXIT_FILE_IO_ERROR)

    # count the number of variants per contig (count uninteresting vars)
    vars_per_contig = contigs.groupby('contig_id', as_index=False)
    vars_per_contig = vars_per_contig.agg(
        {'variant_id': lambda x: len(np.unique(x))})
    vars_per_contig = vars_per_contig.rename({'variant_id': 'vars_in_contig'},
                                             axis=1)
    contigs = contigs.merge(vars_per_contig)

    # consider only variants of interest
    contigs = contigs[contigs.variant_of_interest]
    contigs['sample'] = args.sample

    if args.var_filter:
        contigs = contigs[contigs.variant_type.apply(
            lambda v: v in args.var_filter).values]

    if len(gene_filter) > 0:
        contigs = filter_by_gene(contigs, gene_filter)

    if len(contigs) == 0:
        logging.info('WARNING: no variants present after filtering. Exiting.')
        contigs.to_csv(sys.stdout, index=False, sep='\t', na_rep='NA')
        sys.exit()

    logging.info('Adding DE and VAF info...')
    contigs = add_de_info(contigs, de_results)
    contigs = pd.merge(contigs, vafs, on='contig_id', how='left')

    short_gnames = contigs.overlapping_genes.map(str).apply(
        get_short_gene_name)
    contig_ids, samples = contigs.contig_id, contigs['sample']
    con_names = [
        '|'.join([s, cid, sg])
        for cid, s, sg in zip(contig_ids, samples, short_gnames)
    ]
    contigs[
        'unique_contig_ID'] = con_names  # TODO: fix this field (it is not really unique)

    contigs = get_variant_seq(contigs, args.contig_fasta)

    logging.info('Outputting to CSV')
    contigs = reformat_fields(contigs)
    contigs.to_csv(sys.stdout, index=False, sep='\t', na_rep='NA')
Ejemplo n.º 6
0
 def __init__(self):
     # parse_args is called by argcomplete; must be as fast as possible
     self.args = utils.parse_args()
     super(FreeIPAManager, self).__init__()
     utils.init_logging(self.args.loglevel)
     self._load_settings()
     # Find if users should be pushed from Okta
     self.okta_users = self.settings.get('okta',
                                         dict()).get('enabled', False)
Ejemplo n.º 7
0
def main():
    ui = user_input()
    init_logging(ui.logfile, ui.debug)

    tasks = note.list_tasks(None, ui.notesdir, ui.ext)

    if ui.cmd[0] == 'archive':
        archive_stale(tasks, ui.notesdir)
    elif ui.cmd[0] == 'alias':
        generate_aliases(tasks, ui.notesdir)
Ejemplo n.º 8
0
def main():
    args = parse_args()
    init_logging(args.log)
    set_globals(args)
    keep_contigs = get_contigs_to_keep(args)
    if len(keep_contigs) > 0:
        write_output(args, keep_contigs)
        write_bam(args, keep_contigs)
    else:
        exit_with_error('ERROR: no variants to output.',
                        constants.EXIT_OUTPUT_ERROR)
Ejemplo n.º 9
0
def _main():
    utils.init_logging(r'logs\main.log')

    out = six.StringIO()
    # Expose all functions that don't begin with an underscore "_" in the current module
    argh.dispatch_commands([
        obj for name, obj in inspect.getmembers(sys.modules[__name__])
        if inspect.isfunction(obj) and obj.__module__ == '__main__'
        and not name.startswith('_')
    ],
                           output_file=out)

    print(out.getvalue())
Ejemplo n.º 10
0
def cli(ctx, prefix, verbose, debug, dry_run, force, opsworks):

    log_level = debug and logging.DEBUG or logging.INFO
    utils.init_logging(prefix, verbose, log_level)
    log.debug("Command: %s %s, options: %s", ctx.info_name, ctx.invoked_subcommand, ctx.params)
    if dry_run:
        log.info("Dry run enabled!")

    if opsworks:
        cluster = OpsworksController(prefix, force=force, dry_run=dry_run)
        ctx.meta["opsworks"] = True
    else:
        cluster = EC2Controller(prefix, force=force, dry_run=dry_run)

    # attach controller to the context for subcommand use
    ctx.obj = cluster
Ejemplo n.º 11
0
def run():
    args = add_args()
    init_logging()

    handlers = [
        (r'^/$', IndexHandler),
        (r'/ws/(.*)', ChatHandler, dict(data_path=args.data,
                                        log_path=args.log)),
        (r'/static/(.*)', web.StaticFileHandler, {
            'path': os.path.join(base_dir, 'static')
        }),
        (r"/(.*.html)", web.StaticFileHandler, {
            "path": os.path.join(base_dir, 'templates')
        }),
    ]

    app = web.Application(handlers)
    server = app.listen(args.port)
    ioloop.IOLoop.instance().start()
Ejemplo n.º 12
0
def main():
    dataset, version, nbfiles, pos_tags, tfidf, args = parse_args()

    corpus_type = "tfidf" if tfidf else "bow"

    logger = init_logging(name=f'MM_{dataset}_{corpus_type}',
                          basic=False,
                          to_stdout=True,
                          to_file=True)
    logg = logger.info if logger else print
    log_args(logger, args)

    texts, stats, nbfiles = make_texts(dataset, nbfiles, pos_tags, logg=logg)
    gc.collect()

    file_name = f'{dataset}{nbfiles if nbfiles else ""}_{version}'
    directory = join(LDA_PATH, version)
    if not exists(directory):
        makedirs(directory)

    # --- saving texts ---
    file_path = join(directory, f'{file_name}_texts.json')
    logg(f'Saving {file_path}')
    with open(file_path, 'w') as fp:
        json.dump(texts, fp, ensure_ascii=False)

    # --- saving stats ---
    file_path = join(directory, f'{file_name}_stats.json')
    logg(f'Saving {file_path}')
    with open(file_path, 'w') as fp:
        json.dump(stats, fp)

    # generate and save the dataset as bow or tfidf corpus in Matrix Market format,
    # including dictionary, texts (json) and some stats about corpus size (json)
    corpus, dictionary = texts2corpus(texts,
                                      tfidf=tfidf,
                                      filter_below=5,
                                      filter_above=0.5,
                                      logg=logg)

    file_name += f'_{corpus_type}'
    directory = join(directory, corpus_type)

    # --- saving corpus ---
    file_path = join(directory, f'{file_name}.mm')
    logg(f'Saving {file_path}')
    MmCorpus.serialize(file_path, corpus)

    # --- saving dictionary ---
    file_path = join(directory, f'{file_name}.dict')
    logg(f'Saving {file_path}')
    dictionary.save(file_path)
Ejemplo n.º 13
0
def main():
    args = parse_args(sys.argv[1:])
    init_logging(args.log)

    try:
        contigs = pd.read_csv(args.contig_info, sep='\t', low_memory=False)
        st_bed = pd.read_csv(args.st_bed,
                             sep='\t',
                             header=None,
                             names=BED_COLS,
                             low_memory=False)
    except IOError as exception:
        exit_with_error(str(exception), EXIT_FILE_IO_ERROR)

    logging.info('Matching contigs to ST alignments...')
    contigs = get_st_alignments(contigs, args.cont_align)
    logging.info('Counting reads crossing variant boundaries...')
    bamf = pysam.AlignmentFile(args.read_align, "rb")
    contigs = get_read_support(contigs, bamf, st_bed)

    logging.info('Outputting to CSV')
    contigs = contigs.sort_values(by='PValue', ascending=True)
    contigs.to_csv(sys.stdout, index=False, sep='\t', na_rep='NA')
Ejemplo n.º 14
0
    def __init__(self):
        Flask.__init__(self, __name__)

        # Read configuration from file
        # If settings.py is founds, use it
        # If settings.py doesn't exists, use default_settings.py
        # If COLLECTOR_SETTINGS env variable is set use specified file it
        #
        # http://flask.pocoo.org/docs/config/#configuring-from-files
        try:
            self.config.from_object('settings')
        except:
            self.config.from_object('default_settings')

        try:
          self.config.from_envvar('COLLECTOR_SETTINGS')
        except:
          pass

        self.define_routes()
        self.register_error_handlers()

        init_logging(self)
Ejemplo n.º 15
0
def main():
    args = parse_args()
    init_logging(args.log)

    genome_bed, st_block_bed, st_gene_bed, st_fasta = get_output_files(args.sample, args.outdir)
    if os.path.exists(genome_bed):
        os.remove(genome_bed)
    if os.path.exists(st_block_bed):
        os.remove(st_block_bed)
    if os.path.exists(st_gene_bed):
        os.remove(st_gene_bed)
    if os.path.exists(st_fasta):
        os.remove(st_fasta)

    try:
        gtf = load_gtf_file(args.gtf_file)
        cvcf = load_vcf_file(args.contig_vcf)
        contigs = pd.read_csv(args.contig_info, sep='\t', low_memory=False).fillna('')
    except IOError as exception:
        exit_with_error(str(exception), EXIT_FILE_IO_ERROR)

    make_supertranscripts(args, contigs, cvcf, gtf)
    write_canonical_genes(args, contigs, gtf)
Ejemplo n.º 16
0
def run():
    utils.init_logging()
    logger.info('Monobox fetcher starting up')

    config.init()
    database.init(config.get('common', 'database_uri'))

    max_age = config.get('fetcher', 'max_age')
    if max_age:
        timedelta = utils.str_to_timedelta(max_age)
        if timedelta is None:
            logger.error('Cannot convert configuration parameter '
                    'max_age (%s) to timedelta' % max_age)
            sys.exit(1)
    else:
        timedelta = None

    urls = config.get('fetcher', 'sc_urls').split(' ')
    listeners_min = config.getint('fetcher', 'listeners_min')
    for url in urls:
        merge_sc_stations(url, listeners_min)

    if timedelta is not None:
        purge_old_stations(timedelta)
Ejemplo n.º 17
0
def main(args):

    logdir = init_logging(args)
    logger = logging.getLogger(__name__)

    args.logdir = logdir

    if args.cpu or not th.cuda.is_available():
        device = th.device('cpu')
    else:
        device = th.device('cuda')
        cudnn.enabled = True
        cudnn.benchmark = True

    if not args.devrun and not args.nosave:
        wandb.init(config=args, dir=logdir, project=args.project)

        if args.name is not None:
            wandb.run.name = args.name
        # else:
        #     wandb.run.name = wandb.run.id

    seed_all(args.seed)

    logger.info('Creating dataloader')
    loader = create_dataloader(args)

    logger.info('Creating model')
    model = create_model(args).to(device)

    logger.info('Creating optimiser')
    opt = create_optimiser(model.parameters(), args)

    logger.info('Creating loss')
    loss = create_loss(args)

    logger.info('Creating trainer')
    trainer = create_trainer(loader, model, opt, loss, device, args)

    epochs = args.epochs
    epoch_length = args.epoch_length

    logger.info('Starting trainer')
    wandb.watch(model, log="all", log_freq=1)
    trainer.run(loader['train'], max_epochs=epochs, epoch_length=epoch_length)
Ejemplo n.º 18
0
def main():
    (dataset, version, corpus_type, metrics, params, nbtopics, topn, cores,
     coh, vec, weight, oop, evaluate, save, plot, args) = parse_args()

    # --- logging ---
    logger = init_logging(name=f'Reranking_{dataset}',
                          basic=False,
                          to_stdout=True,
                          to_file=True)
    logg = logger.info
    log_args(logger, args)
    t0 = time()

    reranker = Reranker(dataset=dataset,
                        version=version,
                        corpus_type=corpus_type,
                        params=params,
                        nbtopics=nbtopics,
                        nb_candidate_terms=topn,
                        nb_top_terms=10,
                        processes=cores,
                        logg=logg)
    if coh:
        reranker.rerank_coherence(metrics)
    if vec:
        reranker.rerank_w2v()
    if weight:
        reranker.weight_score()
    if oop:
        reranker.oop_score()
    if evaluate:
        reranker.evaluate()
    if save:
        reranker.save_results()
    if plot:
        reranker.plot()

    logg(f'final shape {reranker.topic_candidates.shape}')
    assert len(reranker.topic_candidates) == 24975

    t1 = int(time() - t0)
    logg(f">>> done in {t1//3600:02d}:{(t1//60)%60:02d}:{t1%60:02d} <<<")
    return reranker
Ejemplo n.º 19
0
def run():
    """
    Main process that\:
     * Parse command-line arguments,
     * Parse configuration file,
     * Initiates logger,
     * Check GitHub permissions,
     * Check Handle Service connection,
     * Run the issue action.

    """
    # Get command-line arguments
    args = get_args()
    # init logging
    if args.v and args.log is not None:
        init_logging(args.log, level='DEBUG')
    elif args.log is not None:
        init_logging(args.log)
    else:
        init_logging(None)

    # Run command
    # Retrieve command has a slightly different behavior from the rest so it's singled out
    if args.command != 'retrieve':
        issue_file = _get_issue(args.issue)
        dataset_file = _get_datasets(args.dsets)
        print(args.issue, issue_file)
        process_command(args.command, issue_file, dataset_file, args.issue, args.dsets)

    elif args.command == 'retrieve':
        if args.id is not None:
            list_of_ids = args.id
            # In the case the user is requesting more than one issue
            for directory in [args.issues, args.dsets]:
                # Added the '.' test to avoid creating directories that are intended to be files.
                if not os.path.exists(directory) and '.' not in directory:
                    os.makedirs(directory)
                # This tests whether a list of ids is provided with a directory where to dump the retrieved
                # issues and related datasets.
                if len(list_of_ids) > 1 and not os.path.isdir(directory):
                    print('You have provided multiple ids but a single file as destination, aborting.')
                    sys.exit(1)
            # Looping over list of ids provided
            for n in list_of_ids:
                local_issue = LocalIssue(None, None, None, None, args.command)
                local_issue.retrieve(n, args.issues, args.dsets)
        else:
            # TODO provide possibility to flush database contents from a dedicated webservice?
            pass
Ejemplo n.º 20
0
def _init(args):
    utils.init_logging(None, args.debug, args.quiet, args.logfile)
Ejemplo n.º 21
0
            d.add_message([(msg, 1.0)], {slot: cls_name}, Dialog.ACTOR_USER)
            dialogs.append(d)

    print '> Data built.'
    xt = XTrackData2()
    xt.build(dialogs,
             slots=['food'],
             slot_groups={0: ['food']},
             based_on=based_on,
             oov_ins_p=0.0,
             include_system_utterances=False,
             n_nbest_samples=1,
             n_best_order=[0],
             score_mean=0.0,
             dump_text='/dev/null',
             replace_entities=False,
             split_dialogs=False)
    print '> Saving.'
    xt.save(out_file)


if __name__ == '__main__':
    from utils import init_logging
    init_logging('ConcatData')

    parser = argparse.ArgumentParser()
    parser.add_argument('file', nargs='*', action='append')
    args = parser.parse_args()

    concat(**vars(args))
Ejemplo n.º 22
0
def main(**args):
    r"""Performs the main training loop
    """
    gray_mode = args['gray']  # gray mode indicator
    C = 1 if gray_mode else 3  # number of color channels

    # Load dataset
    print('> Loading datasets ...')
    dataset_val = ValDataset(valsetdir=args['valset_dir'],
                             gray_mode=gray_mode)  # for grayscale/color video
    # dataset_val = ValDataset(valsetdir=args['valset_dir'], gray_mode=False) # for color videos only
    loader_train = train_dali_loader(batch_size=args['batch_size'],\
                                    file_root=args['trainset_dir'],\
                                    sequence_length=args['temp_patch_size'],\
                                    crop_size=args['patch_size'],\
                                    epoch_size=args['max_number_patches'],\
                                    random_shuffle=True,\
                                    temp_stride=3,\
                                    gray_mode=gray_mode)

    num_minibatches = int(args['max_number_patches'] // args['batch_size'])
    ctrl_fr_idx = (args['temp_patch_size'] - 1) // 2
    print("\t# of training samples: %d\n" % int(args['max_number_patches']))

    # Init loggers
    writer, logger = init_logging(args)

    # Define GPU devices
    device_ids = [0]
    torch.backends.cudnn.benchmark = True  # CUDNN optimization

    # Create model
    model = FastDVDnet(num_color_channels=C)

    model = model.cuda()

    # Define loss
    criterion = nn.MSELoss(reduction='sum')
    criterion.cuda()

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=args['lr'])

    # [AMP initialization] automated half-precision training
    if args['fp16']:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args['amp_opt_level'])

    # model = nn.DataParallel(model, device_ids=device_ids).cuda()
    model = nn.DataParallel(model)

    # Resume training or start anew
    start_epoch, training_params = resume_training(args, model, optimizer)

    # Training
    start_time = time.time()
    for epoch in range(start_epoch, args['epochs']):
        # Set learning rate
        current_lr, reset_orthog = lr_scheduler(epoch, args)
        if reset_orthog:
            training_params['no_orthog'] = True

        # set learning rate in optimizer
        for param_group in optimizer.param_groups:
            param_group["lr"] = current_lr
        print('\nlearning rate %f' % current_lr)

        # train

        for i, data in enumerate(loader_train, 0):

            # Pre-training step
            model.train()

            # When optimizer = optim.Optimizer(net.parameters()) we only zero the optim's grads
            optimizer.zero_grad()

            # convert inp to [N, num_frames*C. H, W] in  [0., 1.] from [N, num_frames, C. H, W] in [0., 255.]
            # extract ground truth (central frame)
            img_train, gt_train = normalize_augment(data[0]['data'],
                                                    ctrl_fr_idx, gray_mode)
            N, _, H, W = img_train.size()

            # std dev of each sequence
            stdn = torch.empty(
                (N, 1, 1, 1)).cuda().uniform_(args['noise_ival'][0],
                                              to=args['noise_ival'][1])
            # draw noise samples from std dev tensor
            noise = torch.zeros_like(img_train)
            noise = torch.normal(mean=noise, std=stdn.expand_as(noise))

            #define noisy input
            imgn_train = img_train + noise

            # Send tensors to GPU
            gt_train = gt_train.cuda(non_blocking=True)
            imgn_train = imgn_train.cuda(non_blocking=True)
            noise = noise.cuda(non_blocking=True)
            noise_map = stdn.expand(
                (N, 1, H, W)).cuda(non_blocking=True)  # one channel per image

            # Evaluate model and optimize it
            out_train = model(imgn_train, noise_map)

            # Compute loss
            loss = criterion(gt_train, out_train) / (N * 2)

            # [AMP scale loss to avoid overflow of float16] automated mixed precision training
            if args['fp16']:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()
            optimizer.step()

            # Results
            if training_params['step'] % args['save_every'] == 0:
                # Apply regularization by orthogonalizing filters
                if not training_params['no_orthog']:
                    model.apply(svd_orthogonalization)

                # Compute training PSNR
                log_train_psnr(out_train, \
                                gt_train, \
                                loss, \
                                writer, \
                                epoch, \
                                i, \
                                num_minibatches, \
                                training_params)
            # update step counter
            training_params['step'] += 1

        # save model and checkpoint
        training_params['start_epoch'] = epoch + 1
        save_model_checkpoint(model, args, optimizer, training_params, epoch)

        # Call to model.eval() to correctly set the BN layers before inference
        model.eval()

        # Validation and log images
        validate_and_log(
                        model_temp=model, \
                        dataset_val=dataset_val, \
                        valnoisestd=args['val_noiseL'], \
                        temp_psz=args['temp_patch_size'], \
                        writer=writer, \
                        epoch=epoch, \
                        lr=current_lr, \
                        logger=logger, \
                        trainimg=img_train, \
                        gray_mode=gray_mode
                        )

    # Print elapsed time
    elapsed_time = time.time() - start_time
    print('Elapsed time {}'.format(
        time.strftime("%H:%M:%S", time.gmtime(elapsed_time))))

    # Close logger file
    close_logger(logger)
Ejemplo n.º 23
0
def run(username, log_path, telegram_chat_ids):
    init_logging(log_path)
    tweet_monitor = TweetMonitor(username, telegram_chat_ids)
    tweet_monitor.run()
Ejemplo n.º 24
0
from datetime import datetime
from zipfile import ZipFile
from io import BytesIO

import os
import os.path
import stat
import shutil
import sys
import subprocess

from property_parser import Property
from BSP import BSP, BSP_LUMPS
import utils

LOGGER = utils.init_logging('bee2/VRAD.log')

CONF = Property('Config', [])
SCREENSHOT_DIR = os.path.join(
    '..',
    'portal2',  # This is hardcoded into P2, it won't change for mods.
    'puzzles',
    # Then the <random numbers> folder
)
# Locations of resources we need to pack
RES_ROOT = [
    os.path.join('..', loc)
    for loc in
    ('bee2', 'bee2_dev', 'portal2_dlc2')
]
Ejemplo n.º 25
0
"""Backup and restore P2C maps.

"""
import utils
if __name__ == '__main__':
    if utils.MAC or utils.LINUX:
        # Change directory to the location of the executable
        # Otherwise we can't find our files!
        # The Windows executable does this automatically.
        import os
        import sys
        os.chdir(os.path.dirname(sys.argv[0]))

    utils.init_logging('../logs/backup.log')


import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox

from tk_tools import TK_ROOT

from datetime import datetime
from io import BytesIO
from codecs import EncodedFile
import time
import os
import shutil
import string
import atexit
Ejemplo n.º 26
0
def main():

    init_logging()
    logger = logging.getLogger("main")
    parser = init_argparse()
    args = parser.parse_args()

    if args.pdf:
        data = args.pdf.split(",")
        RCO_DL.makepdfandclean((data[0], data[1]), 0, logger)
        sys.exit()

    if args.check:
        data = args.check.split(",")
        logger.info(f"Checking {data[0]}:{data[1]}")
        RCO_DL.sanitise(data[0], data[1])
        sys.exit()

    if args.checkall:
        list_data = args.checkall.split(",")
        logger.info(list_data)
        with ThreadPoolExecutor(thread_name_prefix="checkall",
                                max_workers=10) as executor:
            futures = [
                executor.submit(RCO_DL.check_all(data_n))
                for data_n in list_data
            ]
            #wait(futures, return_when=ALL_COMPLETED)
        sys.exit()

    else:

        try:

            series_list = []

            if args.getnew:
                series_list = get_list_ongoing_series()

                #main_url = args.series
            series_list += args.collection

            logger.info(
                f"Will fetch numbers not in local storage yet for the series: {series_list}"
            )

            n_workers = args.workers

            #if not main_url.startswith("https"): main_url = "https://readcomiconline.to/Comic/" + main_url
            rco_dl = RCO_DL(args)

            #issues_links = rco_dl.get_issues_links(main_url, cache=args.cache)
            issues_links = []

            queue_series = Queue()
            for serie in series_list:
                queue_series.put(serie)
            for _ in range(n_workers):
                queue_series.put("KILL")

            #n_workers = min(args.threads, len(series_list))

            with ThreadPoolExecutor(thread_name_prefix="get_issues",
                                    max_workers=n_workers) as executor:
                futures = [
                    executor.submit(rco_dl.get_issues_links, i, queue_series)
                    for i in range(n_workers)
                ]

                done, pending = wait(futures, return_when=ALL_COMPLETED)

            for d in done:
                try:
                    d.result()
                except Exception as e:
                    lines = traceback.format_exception(*sys.exc_info())
                    logger.error(f"{repr(e)}/n{'!!'.join(lines)}")

            if not (issues_links := rco_dl.info_dict.get('issues_links')):
                sys.exit("No se han encontrado ejemplares del cómic")

            logger.debug(issues_links)
            # skip = args.skip
            # if (skip != 0):
            #     issues_links = issues_links[skip:]

            # #subset of consecutive issues
            # first = args.first
            # last = args.last
            # if ((first != 0) and (last != 0)):
            #     issues_links = issues_links[first-1:last]

            # #single issue
            # if args.issue:
            #     if len(series_list) > 1: sys.exit("Se encesita una sóla serie")
            #     if series_list[0].startswith("https"):
            #         comic_name, _ = rco_dl.get_comic_and_issue_name(series_list[0])
            #     else: comic_name = series_list[0]
            #     issues_links = [rco_dl.info_dict['issues_links'].get(comic_name).get(args.issue)]

            #if issues_links:

            try:

                aiorun.run(rco_dl.run(), use_uvloop=True)

            except Exception as e:
                logger.warning(f"Fail in run  {str(e)}")

        except Exception as e:
            logger.warning(str(e), exc_info=True)
Ejemplo n.º 27
0
    sel_frame.columnconfigure(1, weight=1)

    utils.add_mousewheel(text_box, window, sel_frame, button_frame)

    if utils.USE_SIZEGRIP:
        ttk.Sizegrip(button_frame).grid(row=0, column=3)

    if start_open:
        window.deiconify()
        window.lift()
    else:
        window.withdraw()


if __name__ == '__main__':
    utils.init_logging()
    init(True, log_level=logging.DEBUG)

    # Generate a bunch of log messages to test the window.
    def errors():
        # Use a generator to easily run these functions with a delay.
        yield LOGGER.info('Info Message')
        yield LOGGER.critical('Critical Message')
        yield LOGGER.warning('Warning')

        try:
            raise ValueError('An error')
        except ValueError:
            yield LOGGER.exception('Error message')

        yield LOGGER.warning('Post-Exception warning')
Ejemplo n.º 28
0
# get access to telephony & web database
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from rootio.extensions import db

telephony_server = Flask("ResponseServer")
telephony_server.debug = True

from rootio.telephony.models import *
from rootio.radio.models import *

telephony_server.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(telephony_server)


logger = init_logging('news_report')

# redis is used for flagging is_master if program is across multiple stations
r = redis.StrictRedis(host='localhost', port=6379, db=0)

class News(StateMachine):
    initial_state = 'setup'

    def __init__(self, episode_id, station_id):
        self.caller_list = "caller_list-{0}".format(episode_id)
        self.sound_url = "{}{}{}{}".format(TELEPHONY_SERVER_IP,'/~csik/sounds/programs/',episode_id,'/current.mp3')
        self.conference = "plivo" #"news_report_conference-{}".format(episode_id)
        testme = db.session.query(Station).filter(Station.id == station_id).first()
	logger.info("testme : {}    type : {}".format(testme, type(testme)))
        self.station = testme
	self.episode_id = episode_id
Ejemplo n.º 29
0
from zmq.eventloop import ioloop, zmqstream
ioloop.install()
MESSAGE_QUEUE_PORT_WEB = ZMQ_FORWARDER_SPITS_OUT

# get access to telephony & web database
telephony_server = Flask("ResponseServer")
telephony_server.debug = True

from rootio.telephony.models import *
from rootio.radio.models import *

telephony_server.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(telephony_server)

logger = init_logging('station_daemon')

# Daemon class
class StationDaemon(Station):
    def __init__(self, station_id):
        logger.info("Hello World")
        self.gateway = 'sofia/gateway/utl'
        self.caller_queue = []
        self.active_workers = []
        try:
            self.station = db.session.query(Station).filter(Station.id == station_id).one()
        except Exception, e:
            logger.error('Could not load one unique station', exc_info=True)
	logger.info("Initializing station: {}".format(self.station.name))
        # This is for UTL outgoing ONLY.  Should be moved to a utility just for the gateway, or such.
        try:
Ejemplo n.º 30
0
def run_tagger_and_writeout(tagger, dev_data):
    logging.debug("Tagging testing data with the trained tagger.")
    for words, chars, _, _ in dev_data.seqs:

        y_hat = tagger.tag_single_sentence(words, chars)
        y_hat_str = [tagger.tagset.rev(tag_id) for tag_id in y_hat]

        for i, (word, utag) in enumerate(zip(words, y_hat_str)):
            print "{}\t{}\t_\t{}\t_\t_\t_\t_\t_\t_".format(i + 1, dev_data.vocab.rev(word), utag)
        print ""
    logging.debug("Testing data tagged.")


if __name__ == '__main__':
    import utils
    utils.init_logging('Tagger')

    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('training_file',
                        help='Training file.')
    parser.add_argument('--training-dir',
                        help='Training directory where logs and models will be saved.')
    parser.add_argument('--load-model',
                        help='Model filename.')
    parser.add_argument('--batch-size', default=50, type=int,
                        help='Batch size.')
    parser.add_argument('--eval-interval', default=100, type=int,
                        help='Evaluate tagger every specified number of batches.')
    parser.add_argument('--oov-sampling-p', default=0.0, type=float,
Ejemplo n.º 31
0
        next(os.path.dirname(path) for path in (args[k] for k in ('gelfile', 'yamlfile', 'annotationsfile') if k in args)
             if path and os.path.isabs(path))
    except StopIteration:
        logger.debug("No absolute directory found, using cwd.")
        return os.getcwd()

def set_workdir(args):
    """ Change working directory to match args, where args is gelfile or args dict. """
    if isinstance(args, basestring):
        d = os.path.dirname(args)
    else:
        d = get_workdir(args)
    logger.info("Chainging dir: %s", d)
    os.chdir(d)



if __name__ == '__main__':
    logger.setLevel(logging.DEBUG)
    init_logging()
    # test:
    testing = False
    if testing:
        ap = make_parser()
        argns = ap.parse_args('RS323_Agarose_ScaffoldPrep_550V.gel'.split())
    else:
        argns = parseargs('gui')
    cmdlineargs = argns.__dict__
    # set_workdir(args) Not needed, done by app during set_gelfilepath
    main(cmdlineargs)
Ejemplo n.º 32
0
from config import *


telephony_server = Flask("ResponseServer")
telephony_server.debug = True

admin = Admin(telephony_server)

telephony_server.config['SECRET_KEY'] = SECRET_KEY

#prep the socket type, address for zmq
telephony_server.config['ZMQ_SOCKET_TYPE'] = zmq.PUB
telephony_server.config['ZMQ_BIND_ADDR'] = ZMQ_FORWARDER_SPITS_OUT

logger = init_logging('telephony_server')

from rootio.extensions import db  # expects symlink of rootio in own directory
telephony_server.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db = SQLAlchemy(telephony_server)

from rootio.telephony.models import *
from rootio.radio.models import *

admin.add_view(ModelView(PhoneNumber, db.session))
admin.add_view(ModelView(Message, db.session))
admin.add_view(ModelView(Call, db.session))
admin.add_view(ModelView(Person, db.session))
admin.add_view(ModelView(Location, db.session))
admin.add_view(ModelView(Station, db.session))
admin.add_view(ModelView(Program, db.session))
Ejemplo n.º 33
0
                if len(prefix) > 1 and ''.join(filter(lambda c: c.isalpha(), prefix)).islower():
                    for emote in pending_emotes:
                        update_emote(cursor, emote['code'], emote['image_id'])
        else:
            for code in data['emotes']:
                update_emote(cursor, code, data['emotes'][code]['image_id'])

    cursor.close()

    return True

if __name__ == "__main__":
    import sys
    sys.path.append('../')
    from utils import load_config, init_logging
    init_logging('pajbot')
    from apiwrappers import APIBase
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', '-c',
                        required=True,
                        help='Specify which config file to use '
                                '(default: config.ini)')

    args = parser.parse_args()
    config = load_config(args.config)

    sqlconn = pymysql.connect(unix_socket=config['sql']['unix_socket'], user=config['sql']['user'], passwd=config['sql']['passwd'], db=config['sql']['db'], charset='utf8')

    cursor = sqlconn.cursor()
Ejemplo n.º 34
0
def init(config_file=None):
    utils.init_logging()
    logger.info('Monobox aggregator server starting up')

    config.init(config_file)
    database.init(config.get('common', 'database_uri'))