Пример #1
0
def test_logger_invalid_message_debug(capfd, test_tools, logger_layer):
    root.setLevel(DEBUG)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    logger.debug(None)
    test_tools.assert_logger_none(capfd)
Пример #2
0
def test_logger_invalid_message_error(capfd, test_tools, logger_layer):
    root.setLevel(ERROR)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    logger.error(None)
    test_tools.assert_logger_none(capfd)
Пример #3
0
 def run(self):
     self.setup_server()
     self.setup_credentials()
     if self.get_option('version'):
         self.args = ["version"]
     if self.get_option('debug'):
         root.setLevel(DEBUG)
Пример #4
0
 def run(self):
     self.setup_server()
     self.setup_credentials()
     if self.get_option('version'):
         self.args = ["version"]
     if self.get_option('debug'):
         root.setLevel(DEBUG)
Пример #5
0
def init_log(logfile="log.log", level="INFO", server_addr=None):
    if len(root.handlers) is 0:
        # root record all
        root.setLevel(0)
        fmt = "%(asctime)s %(name)s,line:%(lineno)d [%(levelname)s] %(message)s"
        fmter = Formatter(fmt=fmt)
        # display on screen
        s_handler = StreamHandler()
        s_handler.setLevel(level)
        s_handler.setFormatter(fmter)
        root.addHandler(s_handler)
        # write all levels to logfile
        f_handler = FileHandler(logfile)
        # f_handler.setLevel(0)
        f_handler.setFormatter(fmter)
        root.addHandler(f_handler)

        # TCP handler
        if server_addr is not None:
            t_handler = SocketHandler(*server_addr)
            # t_handler.setLevel(0)
            t_handler.setFormatter(fmter)
            root.addHandler(t_handler)
    else:
        raise RuntimeError("init_debug() can only call once.")
Пример #6
0
def cli(config, debug):
    # Load site & configuration.
    os.environ['VLNA_SETTINGS'] = realpath(config)
    from vlna.site import site

    # Enable debugging if specified on the commmand line.
    site.config['DEBUG'] = site.config['DEBUG'] or debug

    # Get relevant configuration options.
    debug = site.config.get('DEBUG', False)
    host = site.config.get('HOST', '::')
    port = int(site.config.get('PORT', 5000))

    # Set up the logging.
    level = DEBUG if debug else INFO
    handler = StreamHandler()
    handler.setLevel(level)
    handler.setFormatter(Formatter('%(levelname)s: [%(name)s] %(message)s'))
    root.addHandler(handler)
    root.setLevel(level)

    # Dump current configuration to the log.
    log.debug('Configuration:')
    for key, value in sorted(site.config.items()):
        log.debug('  %s = %r', key, value)

    # Prepare WSGI handler for the web site.
    handler = WSGIHandler(site)
    app = Application(debug=debug)
    app.router.add_route('*', '/{path_info:.*}', handler)

    # Run the web server / asyncio loop.
    run_app(app, host=host, port=port)
Пример #7
0
def test_logger_invalid_message_invalid_payload_debug(capfd, test_tools,
                                                      logger_layer):
    root.setLevel(DEBUG)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    logger.debug(None, payload={'key': 'this is so wrong'})
    test_tools.assert_logger_none(capfd)
Пример #8
0
def test_logger_valid_message_debug(capfd, test_tools, logger_layer):
    root.setLevel(DEBUG)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    message = 'Something happened'
    logger.debug(message)
    test_tools.assert_logger_stdout(capfd, message)
Пример #9
0
def test_logger_valid_message_error(capfd, test_tools, logger_layer):
    root.setLevel(ERROR)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    message = 'Something wrong happened'
    logger.error(message)
    test_tools.assert_logger_stdout(capfd, message)
Пример #10
0
def set_level(level: str) -> None:
    """
    Sets the log level.

    Args:
        level: Log level.
    """
    root.setLevel(level)
Пример #11
0
def test_logger_valid_message_invalid_payload_debug(capfd, test_tools,
                                                    logger_layer):
    root.setLevel(DEBUG)
    valid_input_event = test_tools.assert_logger_default_input()
    logger = logger_layer.get_logger(valid_input_event)

    message = 'Something wrong happened'
    logger.debug(message, payload={'key': 'this is so wrong'})
    test_tools.assert_logger_stdout(capfd, message)
Пример #12
0
    def set_level(cls, level):
        """
        :raises: ValueError
        """
        level = (level if not isinstance(level, str) else int(
            LOGGING_LEVELS.get(level.upper(), level)))

        for handler in root.handlers:
            handler.setLevel(level)

        root.setLevel(level)
Пример #13
0
def getLogger(name):
    global handler
    if not os.path.exists(LOGDIR):
        os.mkdir(LOGDIR)
    if handler is None:
        path = os.path.join(LOGDIR, LOGFILE)
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5)
        handler.setFormatter(Formatter(FMT))
        root.setLevel(logging.INFO)
        root.addHandler(handler)
    log = logging.getLogger(name)
    return log
Пример #14
0
def getLogger(name=NAME):
    global handler
    if not os.path.exists(LOGDIR):
        os.mkdir(LOGDIR)
    if handler is None:
        path = os.path.join(LOGDIR, LOGFILE)
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5)
        handler.setFormatter(Formatter(FMT))
        root.setLevel(logging.INFO)
        root.addHandler(handler)
    log = logging.getLogger(name)
    return log
Пример #15
0
def configure_root_logger(silent, verbose):
    root_logger.addHandler(IGVMLogHandler())

    # We are summing up the silent and verbose arguments in here.  It
    # is not really meaningful to use them both, but giving an error is not
    # better.  See Python logging library documentation [1] for the levels.
    # Paramiko is overly verbose.  We configure it for one level higher.
    #
    # [1] https://docs.python.org/library/logging.html#logging-levels
    level = 20 + (silent - verbose) * 10
    root_logger.setLevel(level)
    root_logger.getChild('paramiko').setLevel(level + 10)
Пример #16
0
def getLogger(name):
    global handler
    logdir = __logdir()
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    if handler is None:
        path = logfile()
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5)
        handler.setFormatter(Formatter(FMT))
        root.setLevel(logging.INFO)
        root.addHandler(handler)
    log = logging.getLogger(name)
    return log
Пример #17
0
def log_collection_context(logging_memory_handler, tempdir, prefix, optional_archive_path=None):
    from logging import root, DEBUG
    path = get_tar_path(prefix, optional_archive_path)
    root.addHandler(logging_memory_handler)
    root.setLevel(DEBUG)
    try:
        yield path
    finally:
        with open_archive(path) as archive:
            logging_memory_handler.flush()
            logging_memory_handler.close()
            add_directory(archive, tempdir)
            print("Logs collected successfully to {!r}".format(path))
Пример #18
0
def getLogger(name):
    global handler
    logdir = __logdir()
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    if handler is None:
        path = logfile()
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5)
        handler.setFormatter(Formatter(FMT))
        root.setLevel(logging.INFO)
        root.addHandler(handler)
    log = logging.getLogger(name)
    return log
Пример #19
0
def move_iteration(step: int, play_map: {str: any}, action: Value, amount_of_moves: Value, move_list: [str]):
    play_map = simplify_game_data(play_map)
    basicConfig()
    root.setLevel(INFO)
    info("Task started.")
    processes = []
    with amount_of_moves.get_lock():
        is_not_6th_step = not amount_of_moves.value % 6 == 0
    result = {}
    for move in move_list:
        tmp_map = deepcopy(play_map)
        tmp_map = _calculate_move(0, move, tmp_map, is_not_6th_step)
        death_count = Value("i", 0)
        kill_count = Value("i", 0)
        x_coif = tmp_map.width - tmp_map.players[0].x
        y_coif = tmp_map.height - tmp_map.players[0].y
        if len(tmp_map.players) == 1:
            if not tmp_map.players[0].surviving:
                with death_count.get_lock():
                    death_count.value += 1
        else:
            p = Process(target=_test_all_options, args=(1, death_count, kill_count, tmp_map, is_not_6th_step,
                                                        move_list))
            processes.append(p)
            p.start()
        result[move] = [death_count, kill_count, x_coif, y_coif]

    for process in processes:
        process.join()

    next_action = 0
    for i in range(1, len(move_list)):
        with result[move_list[i]][0].get_lock() and result[move_list[i]][1].get_lock():
            if result[move_list[i]][0].value < result[move_list[next_action]][0].value:
                next_action = i
            elif result[move_list[i]][0].value == result[move_list[next_action]][0].value and \
                    result[move_list[i]][1].value > result[move_list[next_action]][1].value:
                next_action = i
    with amount_of_moves.get_lock():
        if amount_of_moves.value == step:
            with action.get_lock():
                action.value = next_action
            info("manuel_calculation finished for move " + str(amount_of_moves.value))
            info("Answer decided to set to " + str(move_list[next_action]))
        else:
            info("manuel_calculation at move " + str(amount_of_moves.value) + " finished too late")
Пример #20
0
def getLogger(name):
    global handler
    logdir = __logdir()
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    if handler is None:
        try:
            level = int(os.environ["KATELLO_CLI_LOGLEVEL"])
        except (KeyError, ValueError):
            level = logging.INFO
        path = logfile()
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5)
        handler.setFormatter(Formatter(FMT))
        root.setLevel(level)
        root.addHandler(handler)
    log = logging.getLogger(name)
    return log
Пример #21
0
def log_collection_context(logging_memory_handler,
                           tempdir,
                           prefix,
                           timestamp,
                           output_path=None,
                           creation_dir=None):
    from logging import root, DEBUG
    path = get_tar_path(prefix, output_path, timestamp, creation_dir)
    root.addHandler(logging_memory_handler)
    root.setLevel(DEBUG)
    try:
        yield path
    finally:
        with open_archive(path) as archive:
            root.removeHandler(logging_memory_handler)
            logging_memory_handler.flush()
            logging_memory_handler.close()
            add_directory(archive, tempdir)
            print("Logs collected successfully to {}".format(path))
Пример #22
0
def basicConfig(**kwargs):
    if len(root.handlers) == 0:
        if 'program_name' in kwargs:
            setProgramName(kwargs['program_name'])
        if 'filename' in kwargs:
            setLogFilename(kwargs['filename'])
        filename = _logFilename
        if filename:
            mode = kwargs.get("filemode", "a")
            hdlr = FileHandler(filename, mode)
        else:
            stream = kwargs.get("stream")
            hdlr = StreamHandler(stream)
        fs = kwargs.get("format", BASIC_FORMAT)
        dfs = kwargs.get("datefmt", None)
        fmt = SilkscreenFormatter(fs, dfs)
        hdlr.setFormatter(fmt)
        root.addHandler(hdlr)
        level = kwargs.get("level")
        if level:
            root.setLevel(level)
Пример #23
0
def main(options):
    basicConfig()
    root.setLevel(verbose_levels.get(options.verbose, ERROR))

    rec_msgs = {}
    unrec_msgs = {}

    handler = make_msg_counter(rec_msgs, unrec_msgs)

    ## make_msg_counter fills in the defaults for the rec_msgs dict; now we can
    ## print those values and exit if the option is given
    if options.printmsgs:
        for name in sorted(k.typeName for k in rec_msgs.keys()):
            print name
        return

    ## if we're still here, we should connect
    con = ibConnection(options.host, options.port, options.clientid)
    con.registerAll(handler)
    con.register(save_order_id, 'NextValidId')
    con.register(save_tick, 'TickSize', 'TickPrice')
    con.connect()
    short_sleep()

    ## and if we've connected, we shoud execute all of the test functions in
    ## the module namespace.
    calls = [v for k, v in globals().items() if k.startswith('test_')]
    for call in sorted(calls):
        call = maybe_verbose(catch_errors(call))
        errors = call(con, options)
        for err in errors:
            error_msgs[err] = call.__name__

    type_count = len(rec_msgs)
    seen_items = rec_msgs.items()
    seen = [(k, v) for k, v in seen_items if v]
    unseen = [(k, v) for k, v in seen_items if not v]

    ## adjust the showmsgs option if given --show=all
    alls = [v for v in options.showmsgs if 'all' in v.lower()]
    if any(alls):
        all, count = name_count(alls[0])
        options.showmsgs = [
            '%s:%s' % (k.typeName, count) for k in rec_msgs.keys()
        ]

    ## ready, set, print!
    for msg_typename in options.showmsgs:
        msg_typename, msg_showmax = name_count(msg_typename)
        formatter = msg_formatters.get(msg_typename, msg_formatters['default'])
        msgs = [v for k, v in seen_items if k.typeName == msg_typename]
        if msgs:
            msgs = msgs[0]
            if not msg_showmax or msg_showmax > len(msgs):
                msg_showmax = len(msgs)
            print '\n%s (%s of %s):' % (
                msg_typename,
                msg_showmax,
                len(msgs),
            )
            for msg in msgs[0:msg_showmax]:
                print formatter(msg)
        else:
            if msg_typename in [k.typeName for k in rec_msgs.keys()]:
                print '\n%s (%s):' % (
                    msg_typename,
                    0,
                )
            else:
                print '\nMessage type %s not recognized' % (msg_typename, )
    ## but wait, there's more!  here we print a summary of seen message
    ## types and associated counts.
    if seen:
        print '\nSeen Message Types (count):'
        for cls, seq in sorted(seen):
            print '    %s (%s)' % (
                cls.__name__,
                len(seq),
            )
    else:
        print '\nTotal failure; no messages received.'
    ## but wait, there's more!  here we print a summary of unseen message
    ## types and associated counts.
    if unseen:
        print '\nUnseen Message Types (help):'
        for cls, zero in sorted(unseen):
            name = cls.__name__
            help = unseen_hints.get(name, '')
            print '    %s%s' % (
                name,
                ' (%s)' % help if help else '',
            )
    else:
        print '\nAll Message types received.'
    ## last but not least we print the seen and unseen totals, and their ratio
    print '\nSummary:'
    args = (type_count, len(seen), len(unseen),
            100 * len(seen) / float(type_count))
    print '   total:%s  seen:%s  unseen:%s  coverage:%2.2f%%' % args
Пример #24
0
    def calc_fitness(self):
        return -self.navigator.get_length(self)
        # return self.navigator.get_time(self) + self.navigator.get_length(self)

    def fitness(self) -> float:
        return self._fitness


def test_module():
    gs = GeneticSolver(gene_type=OptimizableTour, N=200, mutation_rate=0.2)

    places = gs.population[0].places
    xs = [p.x for p in places]
    ys = [p.x for p in places]

    print(xs)
    print(ys)
    solver = TSPSolver.from_data(xs=xs, ys=ys, norm="EUC_2D")
    solution = solver.solve()
    print(solution.optimal_value)
    print(solution.tour)

    for i in range(1000):
        gs.evolve()
        ff = [f.fitness() for f in gs.population]
        print(ff)


root.setLevel(logging.INFO)
test_module()
Пример #25
0
import random
from tempfile import NamedTemporaryFile as NTF,mkdtemp
import glob
from io import StringIO
from subprocess import Popen, PIPE, STDOUT,check_call
import re
from time import time
from stat import ST_SIZE
from traceback import print_exc
from eutils import OS_Runner, STORED, DISCARDED, getConfig,gen_subdb, time_function
from eutils.sdfiterator import sdf_iter
from eutils.coord import CoordinateSolver
from eutils.lshsearch import LSHSearcher


root.setLevel(logging.WARNING)
os_run = OS_Runner()

BINDIR = "" 
BASEDIR = "."
DATADIR = "data"
DB2DB_DISTANCE = os.path.join(BINDIR, "ei-db2db_distance")
DB_SUBSET = os.path.join(BINDIR,"ei-db_subset")
DB_BUILDER = os.path.join(BINDIR,"ei-db_builder")
EUCSEARCHTOOL = os.path.join(BINDIR, "ei-euclid_search")
EVALUATOR = os.path.join(BINDIR, "ei-evaluator")
COORD_TO_BINARY = os.path.join(BINDIR, "ei-bin_formatter")
INDEXED_SEARCH_EVALUATOR = os.path.join(BINDIR, "ei-comparesearch")
COORDSERVER = os.path.join(BINDIR, "ei-coord_server")
SINGLE_SEARCH = os.path.join(BINDIR,"ei-single_search")
K = 200
Пример #26
0
def invoke(args: Optional[List[str]] = None) -> int:
    """
    Entrypoint for `boringmd` via a command line interface.

    Arguments:
        arg: Optional arguments. Will read from the command line if omitted.

    Returns:
        Shell exit code.
    """

    basicConfig(format="%(message)s")
    logger = getLogger("boringmd")

    arg_parser = ArgumentParser(
        "boringmd",
        description=
        "Extracts boring plain text and front matter from Markdown.",
        epilog=("Made with \u2764 by Cariad Eccleston: " +
                "https://github.com/cariad/boringmd • " + "https://cariad.io"),
    )

    arg_parser.add_argument(
        "markdown",
        help="Path to Markdown file",
        metavar="PATH",
        nargs="?",
    )

    arg_parser.add_argument(
        "--front-matter",
        action="store_true",
        help="print front matter only",
    )

    arg_parser.add_argument(
        "--version",
        action="store_true",
        help="print version",
    )

    arg_parser.add_argument(
        "--log-level",
        default="INFO",
        help="log level",
        metavar="LEVEL",
    )

    parsed_args = arg_parser.parse_args(args)
    root.setLevel(parsed_args.log_level.upper())

    try:
        if parsed_args.version:
            print(get_version())
            return 0

        if not parsed_args.markdown:
            logger.error("Path to Markdown file is required.")
            return 3

        if parsed_args.front_matter:
            print(front_matter_from_file(parsed_args.markdown))
        else:
            print(text_from_file(parsed_args.markdown))
        return 0

    except FileNotFoundError as ex:
        logger.error('"%s" not found.', ex.filename)
        return 2
    except Exception as ex:
        logger.exception(ex)
        return 1
Пример #27
0
'''
image processing
'''
import os
import sys
from logging import getLogger, basicConfig, root
from PIL import Image

# set logger
logger = getLogger(__name__)
root.setLevel(level='INFO')
basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')


def load_files(file_path):
    '''
    load files
    '''
    files = os.listdir(file_path)
    return files


def processing_image(img, mode='gray', rotate=0):
    '''
    image processing
    '''
    fix_img = img

    # gray-scale
    if mode == 'gray':
        fix_img = img.convert('L')
Пример #28
0
    def __init__(self):
        super(InputParser, self).__init__()

        # get the parsing done here
        parser = ArgumentParser()

        parser.add_argument("bams",
                            help="the aligned bam file (needs index)",
                            metavar="BAM",
                            nargs="+")
        parser.add_argument(
            "-T",
            "--reference",
            help="the reference to use (only required for cram input)",
        )
        parser.add_argument("-b",
                            "--blackList",
                            help="the black list bed file to filter")
        parser.add_argument("-w",
                            "--whiteList",
                            help="the white list bed file to filter")
        parser.add_argument("-g",
                            "--germline",
                            help="source of germline variants to filter")
        parser.add_argument(
            "-Q",
            "--baseQuality",
            help=
            "min base quality required to be counted as a variant [default: %(default)s]",
            type=int,
            default=25,
        )
        parser.add_argument(
            "--minAverageBaseQuality",
            help=
            "min average base quality through the read to consider it in analysis [default: %(default)s]",
            type=int,
            default=20,
        )
        parser.add_argument(
            "-q",
            "--mappingQuality",
            help=
            "min mapping quality required for a read to be included [default: %(default)s]",
            type=int,
            default=37,
        )
        parser.add_argument(
            "--maxMisMatchesPerRead",
            help=
            "maximum mismatches a read is allowed before it is discarded [default: %(default)s]",
            type=int,
            default=7,
        )
        parser.add_argument(
            "--minMisMatchesPerRead",
            help=
            "minimum mismatches a read needs before it is analysed [default: %(default)s]",
            type=int,
            default=1,
        )
        parser.add_argument(
            "--maxMisMatchesPerFragment",
            help=
            "maximum mismatches a pair of reads is allowed before it is discarded [default: %(default)s]",
            type=int,
            default=7,
        )
        parser.add_argument(
            "--minMisMatchesPerFragment",
            help=
            "minimum mismatches a pair of read needs before it is analysed [default: %(default)s]",
            type=int,
            default=1,
        )
        parser.add_argument(
            "--maxFragmentLengths",
            help=
            "comma seperated list of maximum fragment lengths for a read to be considered for analysis [default: %(default)s] -1 corresponds to unlimited; will be checked with --minFragmentLengths for proper non overlapping intervals",
            type=str,
            default="144,325",
        )
        parser.add_argument(
            "--minFragmentLengths",
            help=
            "comma seperated list of minimum fragment lengths for a read to be considered for analysis [default: %(default)s]; will be checked with --maxFragmentLengths for proper non overlapping intervals",
            type=str,
            default="74,240",
        )
        parser.add_argument(
            "--onlyOverlap",
            help=
            "only consider the overlapping part of the paired end fragment [default: %(default)s]",
            action="store_true",
        )
        parser.set_defaults(onlyOverlap=False)
        parser.add_argument(
            "--strictOverlap",
            help=
            "when enabled and the reads do not agree in the overlap, the mismatch will be discarded [default: %(default)s]",
            action="store_true",
        )
        parser.set_defaults(strictOverlap=False)

        parser.add_argument(
            "-t",
            "--threads",
            help=
            "Amount of additional threads to use for decompression of the bam",
            type=int,
            default=1,
        )

        # parser.add_argument(
        #     "-O",
        #     "--outputType",
        #     help="the format to output the result",
        #     choices=["json", "R"],
        #     default="json",
        # )

        parser.add_argument(
            "-o",
            "--outFileRoot",
            help="the file to write output to (will be created if not present)",
            required=True,
            default=None,
        )
        parser.add_argument(
            "--overwrite",
            help="wether to overwrite previous results [default: %(default)s]",
            action="store_true",
        )
        parser.set_defaults(overwrite=False)
        parser.add_argument(
            "--writeEvidenceBam",
            help=
            "wether to output the reads, which were used to calculate the mismatches as a bam [default: %(default)s]",
            action="store_true",
        )
        parser.set_defaults(writeEvidenceBam=False)
        parser.add_argument(
            "--writeEvidenceAsReadPairs",
            help=
            "wether to output both reads, which were used to calculate the mismatches when writing the evidence bam [default: %(default)s]",
            action="store_true",
        )
        parser.set_defaults(writeEvidenceAsReadPairs=True)

        parser.add_argument(
            "-v",
            "--verbosity",
            help="Level of messages to be displayed",
            choices=["debug", "info", "error"],
            default="info",
        )

        parser.add_argument(
            "-m",
            "--method",
            help=
            "The method to use to deconstruct signatures QP: Quadratic Programming ILM: Iterative Linear [default: %(default)s]",
            choices=["QP", "ILM"],
            default="QP",
        )

        parser.add_argument(
            "--germlineAFCutOff",
            help=
            "minimum allele frequency for a variant in the germline resource to be used to filter [default: %(default)s]",
            type=restricted_float,
            default=0,
        )

        parser.add_argument(
            "--germlineRequirePass",
            help=
            "Flag wheter the germline variant needs to be a PASS filter value to be used for filtering",
            action="store_true",
        )
        # we set this to false, because an issue in the germline calling can hint at a problem we
        # might have with the detection as well
        parser.set_defaults(germlineRequirePass=False)

        parser.add_argument(
            "--normaliseCounts",
            help=
            "Flag to enable normalisation based on the occurrences of the (di/tri)-nucleotides",
            action="store_true",
        )
        # we set this to false, because an issue in the germline calling can hint at a problem we
        # might have with the detection as well
        parser.set_defaults(normaliseCounts=False)

        parser.add_argument(
            "--flatNormalisation",
            help=
            "Flag to enable an even normalisation on the number of the contexts in the genome (default is to use the fraction of found contexts vs the total number in the genome)",
            action="store_true",
        )
        # we set this to false, because an issue in the germline calling can hint at a problem we
        # might have with the detection as well
        parser.set_defaults(flatNormalisation=False)

        # parser.add_argument(
        #     "-n",
        #     "--normals",
        #     help="the normal bams that should be used for plots",
        #     nargs="+",
        #     metavar="BAM",
        #     default=[],
        # )

        params = parser.parse_args()

        # set up the logging before we do anything else
        # now we tell people how much they want to know
        root.setLevel(params.verbosity.upper())

        ############################################################################################
        ###                                    sanity check                                      ###
        ############################################################################################

        # find out if we have a reference
        try:
            rFile = Path(params.reference)
            if rFile.is_file():
                self.referenceFile = params.reference
            else:
                self.referenceFile = None
        except TypeError:
            self.referenceFile = None

        # we better check if the bam files actually exist
        self.bamFiles = []
        for bam in params.bams:
            debug(f"Checking alignment input file: {bam}")
            # we might actually waive this exception (maybe with an option) if just one file is not
            # found? then again we dont want to just do half an analysis.
            bam = Path(bam)
            if not bam.is_file():
                raise Exception("Could not find bam file: " + bam)
            else:
                # this will be done multiple times, so you can combine bams and crams in the
                # analysis without any issues
                # we could theoretically do this with just file endings and it would probably be
                # faster, but then we also would need to check for the index
                with AlignmentFile(bam, "r") as tFile:
                    if tFile.is_cram and self.referenceFile is None:
                        raise Exception("CRAMs need a reference")
                self.bamFiles.append(bam)

        # we do the same test for the normals
        self.normals = []
        # for bam in params.normals:
        #     debug(f"Checking alignment input file: {bam}")
        #     # we might actually waive this exception (maybe with an option) if just one file is not
        #     # found? then again we dont want to just do half an analysis.
        #     bam = Path(bam)
        #     if not bam.is_file():
        #         raise Exception(f"Could not find bam file: {bam}")
        #     else:
        #         # this will be done multiple times, so you can combine bams and crams in the
        #         # analysis without any issues
        #         with AlignmentFile(bam, "r") as tFile:
        #             if tFile.is_cram and self.referenceFile is None:
        #                 raise Exception("CRAMs need a reference")
        #         self.normals.append(bam)

        self.blackListFile = None
        # we really only need to check if the file exists, if a file was actually given to us
        if not params.blackList is None:
            debug(f"Checking blacklist input file: {params.blackList}")

            blFile = Path(params.blackList)
            if not blFile.is_file():
                raise Exception(
                    f"Could not find black list bed file: {params.blackList}")
            else:
                self.blackListFile = blFile

        self.whiteListFile = None
        if not params.whiteList is None:
            debug(f"Checking whitelist input file: {params.whiteList}")

            wlFile = Path(params.whiteList)
            if not wlFile.is_file():
                raise Exception(
                    f"Could not find whitelist bed file: {params.whiteList}")
            else:
                self.whiteListFile = wlFile

        self.germlineFile = ""
        # we really only need to check if the file exists, if a file was actually given to us
        if not params.germline is None:
            debug(f"Checking germline input file: {params.germline}")

            glFile = Path(params.germline)
            if not glFile.is_dir():
                raise Exception(
                    f"Could not find germline zarr folder: {params.germline}")
            else:
                self.germlineFile = glFile

        # some info if weird values get specified
        if params.baseQuality < 0:
            error("base quality needs to be a positive integer")
            exit(1)
        elif params.baseQuality < 20:
            info(
                f"Selected low base quality ({params.baseQuality}) might significantly affect results"
            )
        self.minBQ = params.baseQuality

        # same here, if weird values are set, we just tell and continue unless its completly wrong
        if params.mappingQuality < 0:
            raise Exception("base quality needs to be a positive integer or 0")

        elif params.mappingQuality > 60:
            info(
                "BWA caps mapping quality at 60, there will be no reads to consider if MQ>60"
            )
        self.minMQ = params.mappingQuality

        oFile = None
        if not params.outFileRoot is None:
            # I REALLY WANT TO TEST HERE IF THE directory allows write acces, as it would suck to run through the whole process only to not be able to write the results
            oFile = Path(params.outFileRoot)
        # we also want the None, so we do this outside of the if
        self.outFileRoot = oFile

        # dont need to check anything here, because the parser already does everything for us
        self.verbosity = params.verbosity.upper()
        self.threads = params.threads
        self.method = params.method

        self.minAverageBaseQuality = params.minAverageBaseQuality

        self.maxMisMatchesPerRead = params.maxMisMatchesPerRead
        self.minMisMatchesPerRead = params.minMisMatchesPerRead

        self.maxMisMatchesPerFragment = params.maxMisMatchesPerFragment
        self.minMisMatchesPerFragment = params.minMisMatchesPerFragment

        self.onlyOverlap = params.onlyOverlap
        self.strictOverlap = params.strictOverlap

        # if both strict overlap and only overlap are active, we actually double the
        # base quality, which we require, as the base quality of two agreeing reads
        # gets added up
        # TODO: think if we should add this in here, or if we should just leave it
        # to the user to specify a higher base quality

        self.writeEvidenceBam = params.writeEvidenceBam
        self.writeEvidenceReadPairs = params.writeEvidenceAsReadPairs
        self.overwrite = params.overwrite

        # here we check and parse the fragment length intervals
        minFragList = params.minFragmentLengths.split(",")
        maxFragList = params.maxFragmentLengths.split(",")

        if len(minFragList) != len(maxFragList):
            error(
                "Length of minimum and maximum fragment sizes does not match\n--minFragmentLengths and --maxFragmentLengths need to have the same length"
            )
            exit(1)

        self.fragmentLengthIntervals = []
        for min, max in zip(minFragList, maxFragList):
            try:
                minNum = int(min)
                maxNum = int(max)

                if maxNum == -1:
                    maxNum = float("inf")

                if minNum < 0 or minNum >= maxNum:
                    raise Exception("why you do the stupid?")

                self.fragmentLengthIntervals.append((minNum, maxNum))

            except Exception as e:
                error(
                    f"Specified fragment size interval is no proper size min:{min} max:{max} ({type(e)})"
                )
                exit(1)

        self.afCutOff = params.germlineAFCutOff
        self.germlineRequirePass = params.germlineRequirePass

        self.normaliseCounts = params.normaliseCounts

        self.flatNormalisation = params.flatNormalisation
Пример #29
0
            self.handleError(record)

    def close(self):
        self.acquire()
        try:
            if self.stream:
                self.flush()
                self.stream.close()
                self.stream = None
                Handler.close(self)
        finally:
            self.release()

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        self.baseFilename = self._pathnameprefix + '-' + \
            time.strftime(self._dayfmt) + '.log'
        self.rollover_at += SECONDS_PER_DAY


handler = StreamHandler()
fmter = ConsoleFormatter(colorize=colorize)
handler.setFormatter(fmter)
root.addHandler(handler)
loghandler = LogfileHandler(path.expanduser('~/.config/ufit'))
loghandler.setLevel(WARNING)
root.addHandler(loghandler)
root.setLevel(INFO)
Пример #30
0
 def setUp(self):
     """Clear the handlers on the root logger before each test"""
     root.handlers = list()
     root.setLevel(DEBUG)
Пример #31
0
# then run these tests from within the test directory

from __future__ import with_statement
from contextlib import closing
import unittest, os, sys
from cStringIO import StringIO
from logging import root, DEBUG

import tnef
from tnef.config import *
from tnef.errors import *
from tnef.util import temporary

from util import *

root.setLevel(DEBUG)

tmpdir = "tmptestdir"
   
class TestTnefFunctions(unittest.TestCase):
    
   def testHasBody(self):
      f, s = getFiles("body.tnef")
      self.failUnless(tnef.hasBody(f))
      #self.failUnless(tnef.hasBody(s))
      
      f, s = getFiles("multi-name-property.tnef")
      self.failIf(tnef.hasBody(f))
      #self.failIf(tnef.hasBody(s))

   def testHasFiles(self):
Пример #32
0
import os
import random
from tempfile import NamedTemporaryFile as NTF, mkdtemp
import glob
from io import StringIO
from subprocess import Popen, PIPE, STDOUT, check_call
import re
from time import time
from stat import ST_SIZE
from traceback import print_exc
from eutils import OS_Runner, STORED, DISCARDED, getConfig, gen_subdb, time_function
from eutils.sdfiterator import sdf_iter
from eutils.coord import CoordinateSolver
from eutils.lshsearch import LSHSearcher

root.setLevel(logging.WARNING)
os_run = OS_Runner()

BINDIR = ""
BASEDIR = "."
DATADIR = "data"
DB2DB_DISTANCE = os.path.join(BINDIR, "ei-db2db_distance")
DB_SUBSET = os.path.join(BINDIR, "ei-db_subset")
DB_BUILDER = os.path.join(BINDIR, "ei-db_builder")
EUCSEARCHTOOL = os.path.join(BINDIR, "ei-euclid_search")
EVALUATOR = os.path.join(BINDIR, "ei-evaluator")
COORD_TO_BINARY = os.path.join(BINDIR, "ei-bin_formatter")
INDEXED_SEARCH_EVALUATOR = os.path.join(BINDIR, "ei-comparesearch")
COORDSERVER = os.path.join(BINDIR, "ei-coord_server")
SINGLE_SEARCH = os.path.join(BINDIR, "ei-single_search")
K = 200
Пример #33
0
    '''
    Builds a tree from edges
    '''
    tree = defaultdict(list)
    for _edge in edges:
        tree[_edge.src].append(_edge.dst)
        tree[_edge.dst].append(_edge.src)
    return dict(tree)

def build_a_graph(edges):
    '''
    Builds a undirected graph from connecitons
    '''
    edge = namedtuple('edge', 'src dst cost')
    graph = defaultdict(list)
    for _edge in edges:
        src = _edge[0]
        dst = _edge[1]
        cost = _edge[2]
        edge1 = edge(src, dst, cost)
        edge2 = edge(dst, src, cost)
        graph[src].append(edge1)
        graph[dst].append(edge2)
    return dict(graph)
        
if __name__ == '__main__':
    root.setLevel(0)
    e = [('a', 'b', 1), ('a', 'c', 4), ('a', 'd', 3), ('b', 'd', 2), ('c', 'd', 5)]
    g = build_a_graph(e)
    print construct_tree_from_edges(prim(g))
    
Пример #34
0
from logging import info, warning, error, debug, critical, root, \
    NOTSET, WARNING, INFO
from django.core.cache import cache
max_resolution = 6

WORK_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                        'working')

# pregenerate a white image

img = Image.new('RGBA', (256, 256), (256, 256, 256, 256))
storage = StringIO()
img.save(storage, format='png')
white = storage.getvalue()

root.setLevel(WARNING)

# gs command

gs = \
    'gs -dUseCIEColor -dFirstPage=1 -dLastPage=1 -dUseCropBox -dSAFER -dBATCH -dNOPAUSE -r%dx%d -dNOPLATFONTS -sDEVICE=png16m -dBackgroundColor=16#ffffff -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -sOutputFile=%s %s > /dev/null'

R_TMPL = \
    Template("""

check_tree <- function(x, desired, level)
{
  if (attr(x, 'members') == 1) {
    # leaf
    if (attr(x, 'label') == desired) return(list(1, NULL))
    else return(list(0, NULL));