Пример #1
0
    def __init__(self, config_file, gpu='-1', debug=False):
        """Load parameters and set log level.

        Args:
            config_file (str): path to the config file, which should be in ``yaml`` format.
                You can use default config provided in the `Github repo`_, or write it by yourself.
            debug (bool, optional): whether to enable debug function during running. Defaults to False.

        .. _Github repo:
            https://github.com/RUCAIBox/CRSLab

        """

        self.opt = self.load_yaml_configs(config_file)
        # gpu
        self.opt['gpu'] = gpu
        # dataset
        dataset = self.opt['dataset']
        tokenize = self.opt['tokenize']
        if isinstance(tokenize, dict):
            tokenize = ', '.join(tokenize.values())
        # model
        model = self.opt.get('model', None)
        rec_model = self.opt.get('rec_model', None)
        conv_model = self.opt.get('conv_model', None)
        policy_model = self.opt.get('policy_model', None)
        if model:
            model_name = model
        else:
            models = []
            if rec_model:
                models.append(rec_model)
            if conv_model:
                models.append(conv_model)
            if policy_model:
                models.append(policy_model)
            model_name = '_'.join(models)
        self.opt['model_name'] = model_name
        # log
        log_name = self.opt.get(
            "log_name", dataset + '_' + model_name + '_' +
            time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) + ".log"
        if not os.path.exists("log"):
            os.makedirs("log")
        logger.remove()
        if debug:
            level = 'DEBUG'
        else:
            level = 'INFO'
        logger.add(os.path.join("log", log_name), level=level)
        logger.add(lambda msg: tqdm.write(msg, end=''),
                   colorize=True,
                   level=level)

        logger.info(f"[Dataset: {dataset} tokenized in {tokenize}]")
        if model:
            logger.info(f'[Model: {model}]')
        if rec_model:
            logger.info(f'[Recommendation Model: {rec_model}]')
        if conv_model:
            logger.info(f'[Conversation Model: {conv_model}]')
        if policy_model:
            logger.info(f'[Policy Model: {policy_model}]')
        logger.info("[Config]" + '\n' + json.dumps(self.opt, indent=4))
Пример #2
0
def run(args):
    hosts = args.node or DEFAULT_NODES

    if not args.verbose:
        LOG.remove()
        LOG.add(
            sys.stdout,
            format="<green>[{time:HH:mm:ss.SSS}]</green> {message}",
        )
        LOG.disable("infra")
        LOG.disable("ccf")

    LOG.info(
        f"Starting {len(hosts)} CCF node{'s' if len(hosts) > 1 else ''}...")
    if args.enclave_type == "virtual":
        LOG.warning("Virtual mode enabled")

    with infra.network.network(
            hosts=hosts,
            binary_directory=args.binary_dir,
            library_directory=args.library_dir,
            dbg_nodes=args.debug_nodes,
    ) as network:
        if args.recover:
            args.label = args.label + "_recover"
            LOG.info("Recovering network from:")
            LOG.info(f" - Common directory: {args.common_dir}")
            LOG.info(f" - Ledger: {args.ledger_dir}")
            if args.snapshot_dir:
                LOG.info(f" - Snapshots: {args.snapshot_dir}")
            else:
                LOG.warning(
                    "No available snapshot to recover from. Entire transaction history will be replayed."
                )
            network.start_in_recovery(
                args,
                args.ledger_dir,
                snapshot_dir=args.snapshot_dir,
                common_dir=args.common_dir,
            )
            network.recover(args)
        else:
            network.start_and_join(args)

        nodes = network.get_joined_nodes()
        max_len = max([len(str(node.local_node_id)) for node in nodes])

        # To be sure, confirm that the app frontend is open on each node
        for node in nodes:
            with node.client("user0") as c:
                if args.verbose:
                    r = c.get("/app/commit")
                else:
                    r = c.get("/app/commit", log_capture=[])
                assert r.status_code == http.HTTPStatus.OK, r.status_code

        def pad_node_id(nid):
            return (f"{{:{max_len}d}}").format(nid)

        LOG.info("Started CCF network with the following nodes:")
        for node in nodes:
            LOG.info("  Node [{}] = https://{}:{}".format(
                pad_node_id(node.local_node_id), node.pubhost, node.pubport))

        LOG.info(
            f"You can now issue business transactions to the {args.package} application"
        )
        if args.js_app_bundle is not None:
            LOG.info(f"Loaded JS application: {args.js_app_bundle}")
        LOG.info(
            f"Keys and certificates have been copied to the common folder: {network.common_dir}"
        )
        LOG.info(
            "See https://microsoft.github.io/CCF/main/use_apps/issue_commands.html for more information"
        )
        LOG.warning("Press Ctrl+C to shutdown the network")

        try:
            while True:
                time.sleep(60)

        except KeyboardInterrupt:
            LOG.info("Stopping all CCF nodes...")

    LOG.info("All CCF nodes stopped.")
def Main():
    default_config = os.path.join(CCF_Etc, 'cchost.toml')
    default_output = os.path.join(CCF_Keys, 'ledger_authority.pem')

    parser = argparse.ArgumentParser(
        description='Fetch the ledger authority key from a CCF server')

    parser.add_argument(
        '--logfile',
        help='Name of the log file, __screen__ for standard output',
        default='__screen__',
        type=str)
    parser.add_argument('--loglevel',
                        help='Logging level',
                        default='WARNING',
                        type=str)

    parser.add_argument('--ccf-config',
                        help='Name of the CCF configuration file',
                        default=default_config,
                        type=str)
    parser.add_argument('--user-name',
                        help="Name of the user being added",
                        default="userccf",
                        type=str)

    options = parser.parse_args()

    # -----------------------------------------------------------------
    LOG.remove()
    if options.logfile == '__screen__':
        LOG.add(sys.stderr, level=options.loglevel)
    else:
        LOG.add(options.logfile)

    # -----------------------------------------------------------------
    try:
        config = toml.load(options.ccf_config)
    except:
        LOG.error('unable to load ccf configuration file {0}'.format(
            options.ccf_config))
        pass

    network_cert = config["start"]["network-cert-file"]
    (host, port) = config["rpc-address"].split(':')

    user_cert_file = os.path.join(CCF_Keys,
                                  "{}_cert.pem".format(options.user_name))
    user_key_file = os.path.join(CCF_Keys,
                                 "{}_privk.pem".format(options.user_name))

    try:
        user_client = CCFClient(host=host,
                                port=port,
                                cert=user_cert_file,
                                key=user_key_file,
                                ca=network_cert,
                                format='json',
                                prefix='app',
                                description="none",
                                version="2.0",
                                connection_timeout=3,
                                request_timeout=3)
    except:
        LOG.error('failed to connect to CCF service')
        sys.exit(-1)

    #Temporary fix to skip checking CCF host certificate. Version 0.11.7 CCF certificate expiration was hardcoded to end of 2021
    user_client.client_impl.session.mount("https://", HTTPAdapter())
    user_client.client_impl.session.verify = False

    generate_ledger_authority(user_client, options, config)

    LOG.info('successfully generated ledger authority')
    sys.exit(0)
Пример #4
0
 def thread_2():
     barrier.wait()
     time.sleep(0.5)
     logger.remove(a)
     logger.info("ccc{}ddd", next(counter))
Пример #5
0
# 终端日志输出格式
stdout_fmt = '<cyan>{time:HH:mm:ss,SSS}</cyan> ' \
          '[<level>{level: <5}</level>] ' \
          '<blue>{module}</blue>:<cyan>{line}</cyan> - ' \
          '<level>{message}</level>'
# 日志文件记录格式
logfile_fmt = '<light-green>{time:YYYY-MM-DD HH:mm:ss,SSS}</light-green> ' \
          '[<level>{level: <5}</level>] ' \
          '<cyan>{process.name}({process.id})</cyan>:' \
          '<cyan>{thread.name: <10}({thread.id: <5})</cyan> | ' \
          '<blue>{module}</blue>.<blue>{function}</blue>:' \
          '<blue>{line}</blue> - <level>{message}</level>'

log_path = result_save_path.joinpath('oneforall.log')

logger.remove()
logger.level(name='TRACE', no=5, color='<cyan><bold>', icon='✏️')
logger.level(name='DEBUG', no=10, color='<blue><bold>', icon='🐞 ')
logger.level(name='INFOR', no=20, color='<green><bold>', icon='ℹ️')
logger.level(name='ALERT', no=30, color='<yellow><bold>', icon='⚠️')
logger.level(name='ERROR', no=40, color='<red><bold>', icon='❌️')
logger.level(name='FATAL', no=50, color='<RED><bold>', icon='☠️')

if not os.environ.get('PYTHONIOENCODING'):  # 设置编码
    os.environ['PYTHONIOENCODING'] = 'utf-8'

logger.add(sys.stderr, level='INFOR', format=stdout_fmt, enqueue=True)
logger.add(log_path,
           level='DEBUG',
           format=logfile_fmt,
           enqueue=True,
Пример #6
0
def common_setup(_C: Config,
                 _A: argparse.Namespace,
                 job_type: str = "pretrain"):
    r"""
    Setup common stuff at the start of every pretraining or downstream
    evaluation job, all listed here to avoid code duplication. Basic steps:

    1. Fix random seeds and other PyTorch flags.
    2. Set up a serialization directory and loggers.
    3. Log important stuff such as config, process info (useful during
        distributed training).
    4. Save a copy of config to serialization directory.

    .. note::

        It is assumed that multiple processes for distributed training have
        already been launched from outside. Functions from
        :mod:`virtex.utils.distributed` module ae used to get process info.

    Parameters
    ----------
    _C: virtex.config.Config
        Config object with all the parameters.
    _A: argparse.Namespace
        Command line arguments.
    job_type: str, optional (default = "pretrain")
        Type of job for which setup is to be done; one of ``{"pretrain",
        "downstream"}``.
    """

    # Get process rank and world size (assuming distributed is initialized).
    RANK = dist.get_rank()
    WORLD_SIZE = dist.get_world_size()

    # For reproducibility - refer https://pytorch.org/docs/stable/notes/randomness.html
    torch.manual_seed(_C.RANDOM_SEED)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    random.seed(_C.RANDOM_SEED)
    np.random.seed(_C.RANDOM_SEED)

    # Create serialization directory and save config in it.
    os.makedirs(_A.serialization_dir, exist_ok=True)
    _C.dump(os.path.join(_A.serialization_dir, f"{job_type}_config.yaml"))

    # Remove default logger, create a logger for each process which writes to a
    # separate log-file. This makes changes in global scope.
    logger.remove(0)
    if dist.get_world_size() > 1:
        logger.add(
            os.path.join(_A.serialization_dir, f"log-rank{RANK}.txt"),
            format="{time} {level} {message}",
        )

    # Add a logger for stdout only for the master process.
    if dist.is_master_process():
        logger.add(sys.stdout,
                   format="<g>{time}</g>: <lvl>{message}</lvl>",
                   colorize=True)

    # Print process info, config and args.
    logger.info(f"Rank of current process: {RANK}. World size: {WORLD_SIZE}")
    logger.info(str(_C))

    logger.info("Command line args:")
    for arg in vars(_A):
        logger.info("{:<20}: {}".format(arg, getattr(_A, arg)))
Пример #7
0
 def remove():
     barrier.wait()
     time.sleep(0.5)
     logger.remove(j)
Пример #8
0
def _setup_logger():
    logger.remove(0)
    logger.add(sys.stderr, level=config.Logging.level)
Пример #9
0
#!/usr/bin/env python -W ignore::FutureWarning -W ignore::UserWarning -W ignore::DeprecationWarning
"""PINT-based tool for making simulated TOAs."""

import astropy.units as u
import numpy as np
from astropy.time import TimeDelta
import sys

import pint.logging
from loguru import logger as log

log.remove()
log.add(
    sys.stderr,
    level="WARNING",
    colorize=True,
    format=pint.logging.format,
    filter=pint.logging.LogFilter(),
)

import pint.fitter
import pint.models
import pint.toa as toa
import pint.simulation
import pint.residuals

__all__ = ["main"]


def main(argv=None):
    import argparse
Пример #10
0
def main():
    #print(sys.argv[1])

    start_time = time()
    file_counter = 0
    nucleotides = ["A", "T", "G", "C"]

    if sys.argv[1].strip() == "count_first_in_duplex" or \
        sys.argv[1].strip() == "count_second_in_duplex":
        nuc_pos_in_duplex_to_count = sys.argv[1].strip()
        #print(nuc_pos_in_duplex_to_count)
    #############################
    # logging
    if not os.path.exists("./log"):
        os.mkdir("./log")
    logger.remove() # don't put messages into notebook output
    logger.add("./log/apobec_count_snp_duplex_log_{time}" + "_" + nuc_pos_in_duplex_to_count + ".txt", backtrace=False)
    logger.add(sys.stderr, level="CRITICAL")
    #############################################
    
    if os.path.exists("./input_data"): #TODO make if not and then return
    # to get rid of else block below
        input_files = get_input_files_names("./input_data")

        num_files = len(input_files)
        print("""
               ---------------
               job started at {0} ...
               ---------------
               """.format(get_current_time()))

        progress_bar = IntProgress(min=0, max=num_files, bar_style='success')
        display(progress_bar)
        
        for f in input_files:
            try:
                df_raw_snp_container = []
                df_duplex_container = []
                ref_seq = get_ref(f)
                ####################################################
                # nuc we use in loop is very important
                # 1. it's the nuc we use to find all duplexes in reference 
                # which start from it
                # 2. then we search in count_snp func to which nucs it changes 
                # in the same position in the read and in the same context as in the read
                # (i.e. the second nuc in duplex is the same as in reference but the fitst nuc (
                # which is "nuc" var in the for loop below) changes)
                for nuc in nucleotides:
                    snp_type = nucleotides[:]
                    # remove nuc from snp type
                    snp_type.remove(nuc)
                     
                    if nuc_pos_in_duplex_to_count == "count_first_in_duplex":
                        duplex_posits = get_duplex_posits_with_ref_nuc_at_first_pos(ref_seq, nuc)
                        df = create_df(duplex_posits, snp_type)        
                        df_snp, coverage, record_id = count_snp_with_ref_nuc_at_first_pos(f, df, nuc)
                        #print(df_snp)
                    elif nuc_pos_in_duplex_to_count == "count_second_in_duplex":
                        duplex_posits = get_duplex_posits_with_ref_nuc_at_second_pos(ref_seq, nuc)
                        df = create_df(duplex_posits, snp_type)        
                        df_snp, coverage, record_id = count_snp_with_ref_nuc_at_second_pos(f, df, nuc)
                        #print(df_snp)
                    
                    df_duplex_in_context = create_pivot_df(df_snp)
                    #print(df_duplex_in_context)
                    #print("index [0][0]", df_duplex_in_context.index[0][0])
                    #print("index [0][1]", df_duplex_in_context.index[0][1])
                    df_raw_snp_container.append(df_snp)
                    df_duplex_container.append(df_duplex_in_context)
                    
                df_perc_container = convert_pivot_df_into_percent(df_duplex_container)
                df_cov = pd.DataFrame.from_dict(
                    {"coverage": coverage}, orient='index')
                
                # two function with condition, so as not to have conditions inside a single function
                if nuc_pos_in_duplex_to_count == "count_first_in_duplex":
                    save_df_first_in_duplex(f,
                                            df_raw_snp_container,
                                            df_duplex_container,
                                            df_perc_container,
                                            df_cov,
                                            nuc_pos_in_duplex_to_count)
                elif nuc_pos_in_duplex_to_count == "count_second_in_duplex":
                    save_df_second_in_duplex(f,
                                            df_raw_snp_container,
                                            df_duplex_container,
                                            df_perc_container,
                                            df_cov,
                                            nuc_pos_in_duplex_to_count)
            
                progress_bar.value += 1
                file_counter += 1
            
            except Exception:
                logger.info(
                    """\n\t==================================================
                    file: {0} 
                    sequence id: {1}
                    ====================================================""".format(f, record_id))
                logger.exception("")
                print("exception detected. see log for more details")
                progress_bar.value += 1                

        finish_time = time()
        total_time = finish_time - start_time
        show_report(total_time, file_counter)

    else:
        os.mkdir("./input_data")
        print(
            """
        Houston, we have a problem...
        --------
        folder 'input_data' doesn't exist in the current directory 
        or may be you've created it but misspelled its name.
        Anyway, it has just been created by this script.
        Paste your 'fasta' files into the folder 'input_data' 
        and run this script again.
        --------
        """
        )
Пример #11
0
from pathlib import Path
from typing import *

import pandas
from loguru import logger
from statsmodels.sandbox.stats.multicomp import TukeyHSDResults  # Used to add a typing annotation to tukeyhsd()

import analysis
import projectoutput
import utilities
from analysis import growthcurver
from projectpaths import Filenames

TRACE = True
if TRACE:
    logger.remove(
    )  # Need to remove the default sink so that the logger doesn't print messages twice.
    import sys

    logger.add(sys.stderr, level="TRACE")

pandas.set_option('mode.chained_assignment', None)
EXPECTED_FORMAT = "[strain].[consition].[plate].[replicate]"


def extract_labels_from_metadata(metadata: pandas.DataFrame, column: str, allowed_strains: List[str] = None, allowed_conditions: List[str] = None) -> \
  List[str]:
    """
		Extracts sample ids based on a specific column in the sample metadata table.
	Parameters
	----------
	metadata: pandas.DataFrame
Пример #12
0
def caplog(caplog):
    handler_id = logger.add(caplog.handler, format="{message}")
    yield caplog
    logger.remove(handler_id)
Пример #13
0
def base_logger():
    """Initialize logging instance."""
    _loguru_logger.remove()
    _loguru_logger.add(sys.stdout, format=_LOG_FMT, level="INFO", enqueue=True)
    _loguru_logger.configure(levels=_LOG_LEVELS)
    return _loguru_logger
Пример #14
0
def main_entry_point(argv=None):
    arguments = docopt(__doc__, version=__version__, argv=argv)
    logger.remove(0)  # Don't log directly to stderr
    if arguments["--debug"]:
        log_level = "DEBUG"
        log_fmt = (
            "[decofre] "
            "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> |"
            "<cyan>{name}</cyan>:<cyan>{function}</cyan>@<cyan>{line}</cyan>: "
            "<level>{message}</level>")
    else:
        log_level = "INFO"
        log_fmt = ("[decofre] "
                   "<green>{time:YYYY-MM-DD}T{time:HH:mm:ss}</green> {level}: "
                   "<level>{message}</level>")
    logger.add(utils.TqdmCompatibleStream(),
               level=log_level,
               format=log_fmt,
               colorize=True)
    # Since there are no support for default positional arguments in
    # docopt yet. Might be useful for complex default values, too
    if arguments["<out-file>"] is None:
        arguments["<out-file>"] = "-"

    corpus_dir = pathlib.Path(arguments["<corpus-path>"]).resolve()
    antecedents_dir = (corpus_dir / "antecedents").resolve()

    with directory_manager(
            arguments["--intermediate-dir"]) as intermediate_dir:
        score_dir = (intermediate_dir / "score").resolve()
        gold_clusters_dir = (corpus_dir / "clusters").resolve()
        sys_clusters_dir = (intermediate_dir / "clusters").resolve()
        for p in (score_dir, sys_clusters_dir):
            p.mkdir(parents=True, exist_ok=True)

        antecedents_files = sorted(antecedents_dir.glob("*.json"))
        pbar = tqdm.tqdm(
            antecedents_files,
            unit="documents",
            desc="Processing",
            unit_scale=True,
            unit_divisor=1024,
            dynamic_ncols=True,
            leave=False,
            disable=None,
        )
        for data_file in pbar:
            stem = data_file.stem
            pbar.set_description(f"Processing {stem}")
            score_file = score_dir / f"{stem}.json"
            if not score_file.exists() or arguments["--overwrite"]:
                score.main_entry_point(
                    [arguments["<model>"],
                     str(data_file),
                     str(score_file)])
            else:
                logger.debug(f"Skipping scoring {score_file}")

            sys_clusters_file = sys_clusters_dir / f"{stem}.json"
            if not sys_clusters_file.exists() or arguments["--overwrite"]:
                clusterize.main_entry_point(
                    [str(score_file), str(sys_clusters_file)])
            else:
                logger.debug(f"Skipping clustering {sys_clusters_file}")

        with smart_open(arguments["<out-file>"], "w") as out_stream:
            out_stream.writelines(
                scorch.main.process_dirs(gold_clusters_dir, sys_clusters_dir))
Пример #15
0
"""
@author: 闲欢
"""
from loguru import logger

logger.debug('this is a debug message')

logger.add('hello.log')

logger.debug('i am in log file')

id = logger.add('world.log',
                format="{time} | {level} | {message}",
                level="INFO")
logger.info('this is a debug message')
logger.remove(id)
logger.info('this is another debug message')
logger.add('runtime.log')
logger.info('this is an debug message')

# 超过200M就新生成一个文件
logger.add("size.log", rotation="200 MB")
# 每天中午12点生成一个新文件
logger.add("time.log", rotation="12:00")
# 一周生成一个新文件
logger.add("size.log", rotation="1 week")


@logger.catch
def a_function(x):
    return 1 / x
Пример #16
0
def main(argv=None):
    import argparse

    parser = argparse.ArgumentParser(
        description="PINT tool for simulating TOAs",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="Output TOA file name")
    parser.add_argument(
        "--inputtim",
        help="Input tim file for fake TOA sampling",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--startMJD",
        help="MJD of first fake TOA",
        type=float,
        default=56000.0,
    )
    parser.add_argument("--ntoa",
                        help="Number of fake TOAs to generate",
                        type=int,
                        default=100)
    parser.add_argument("--duration",
                        help="Span of TOAs to generate (days)",
                        type=float,
                        default=400.0)
    parser.add_argument("--obs", help="Observatory code", default="GBT")
    parser.add_argument(
        "--freq",
        help="Frequency for TOAs (MHz)",
        nargs="+",
        type=float,
        default=1400.0,
    )
    parser.add_argument(
        "--error",
        help="Random error to apply to each TOA (us)",
        type=float,
        default=1.0,
    )
    parser.add_argument(
        "--addnoise",
        action="store_true",
        default=False,
        help="Actually add in random noise, or just populate the column",
    )
    parser.add_argument(
        "--fuzzdays",
        help="Standard deviation of 'fuzz' distribution (jd)",
        type=float,
        default=0.0,
    )
    parser.add_argument("--plot",
                        help="Plot residuals",
                        action="store_true",
                        default=False)
    parser.add_argument("--format",
                        help="The format of out put .tim file.",
                        default="TEMPO2")
    parser.add_argument(
        "--log-level",
        type=str,
        choices=("TRACE", "DEBUG", "INFO", "WARNING", "ERROR"),
        default=pint.logging.script_level,
        help="Logging level",
        dest="loglevel",
    )
    args = parser.parse_args(argv)
    log.remove()
    log.add(
        sys.stderr,
        level=args.loglevel,
        colorize=True,
        format=pint.logging.format,
        filter=pint.logging.LogFilter(),
    )

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    out_format = args.format
    error = args.error * u.microsecond

    if args.inputtim is None:
        log.info("Generating uniformly spaced TOAs")
        ts = pint.simulation.make_fake_toas_uniform(
            startMJD=args.startMJD,
            endMJD=args.startMJD + args.duration,
            ntoas=args.ntoa,
            model=m,
            obs=args.obs,
            error=error,
            freq=np.atleast_1d(args.freq) * u.MHz,
            fuzz=args.fuzzdays * u.d,
            add_noise=args.addnoise,
        )
    else:
        log.info("Reading initial TOAs from {0}".format(args.inputtim))
        ts = pint.simulation.make_fake_toas_fromtim(
            args.inputtim,
            model=m,
            obs=args.obs,
            error=error,
            freq=np.atleast_1d(args.freq) * u.MHz,
            fuzz=args.fuzzdays * u.d,
            add_noise=args.addnoise,
        )

    # Write TOAs to a file
    ts.write_TOA_file(args.timfile, name="fake", format=out_format)

    if args.plot:
        # This should be a very boring plot with all residuals flat at 0.0!
        import matplotlib.pyplot as plt
        from astropy.visualization import quantity_support

        quantity_support()

        r = pint.residuals.Residuals(ts, m)
        plt.errorbar(
            ts.get_mjds(),
            r.calc_time_resids(calctype="taylor").to(u.us),
            yerr=ts.get_errors().to(u.us),
            fmt=".",
        )
        plt.xlabel("MJD")
        plt.ylabel("Residual (us)")
        plt.grid(True)
        plt.show()
Пример #17
0
import pyutil.pretty as pretty
import routers

if __name__ == "__main__":
    # disable stdin, redirect stdout/stderr to uhrs.out
    stdout = open(file="uhrs.out", mode="a", encoding="utf-8")
    stdin = open(file="/dev/null")
    os.dup2(stdin.fileno(), sys.stdin.fileno())
    os.dup2(stdout.fileno(), sys.stderr.fileno())
    os.dup2(stdout.fileno(), sys.stdout.fileno())

    # become a daemon process
    daemon.daemonize(pid_file="uhrs.pid")

    # init loguru
    logger.remove(None)
    # logger.add(sink="uhrs_{time}.log", rotation="00:00", retention="10 days")
    logger.add(sink="uhrs.log")
    logger.info("starting uhrs service...")
    conf = configparser.ConfigParser()
    conf.read(filenames="uhrs.cfg", encoding="utf-8")
    log_num_list = conf.get("server", "log_num_list", fallback="")
    for num in log_num_list.split(","):
        pretty.set_log_number(int(num))

    # init db pool
    dbpool.init_db_pool(conf)

    # init redis
    redis_on = conf.has_section("redis")
    if redis_on:
Пример #18
0
    """A loguru Helper method to format log strings"""
    assert isinstance(record, dict)
    keyname = "extra[classname]" if "classname" in record.get("extra") else "name"
    return (
            "<green>{time:YYYY-MM-DD_HH:mm:ss.SSS}</green> | <lvl>{level: <8}</lvl> | <c>%s:{function}:{line}</c> - <level>{message}</level>" % keyname
    )

def log_retention(files, max_log_size=20*1024**3):
    """Specify logfile retention policy, per file"""
    stats = [(_file, os.stat(_file)) for _file in files]
    stats.sort(key=lambda s: -s[1].st_mtime)  # Sort files from newest to oldest
    while sum(s[1].st_size for s in stats) > max_log_size:
        _file, _ = stats.pop()
        os.remove(_file)

logger.remove()  # Disable intrusive loguru defaults... ref
#     https://github.com/Delgan/loguru/issues/208
logger.add(
    sink=sys.stderr,
    colorize=True,
    diagnose=True,
    backtrace=True,
    enqueue=True,
    serialize=False,
    catch=True,
    level="DEBUG",
    # compression, encoding, and retention throw errors in logger.add()
    #compression="gzip",
    #encoding="utf-8",
    #retention="3 months",  # also see log_retention()
    # log_format_string(record)
Пример #19
0
def test_handler_removed():
    i = logger.add(sys.__stdout__)
    logger.add(sys.__stderr__)
    logger.remove(i)
    assert repr(
        logger) == "<loguru.logger handlers=[(id=1, level=10, sink=<stderr>)]>"
Пример #20
0
    r = Repository(os.path.join("artifacts", f"{REPO_NAME}.db.tar.zst"))

    with open(os.path.join('artifacts', 'repoPackages.json'), 'w') as writer:
        vs = list(map(lambda v: v.dict(), r.entries.values()))
        writer.write(json.dumps(vs))
    repo.upload_file('repoPackages.json')


def render_main():
    repo = S3Repo(REPO_NAME, BUCKET_NAME)
    repo.download()
    upload_index(repo)


if __name__ == "__main__":
    logger.remove(0)
    logger.add(sys.stderr,
               format="<level>{level: <8}</level> | <cyan>{function}</cyan>:"
               "<cyan>{line}</cyan> - <level>{message}</level>",
               colorize=True)
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--build',
                        help='build latest packages',
                        action='store_true')
    parser.add_argument('--package',
                        help='update repo with latest packages',
                        action='store_true')
    parser.add_argument('--render',
                        help='render and upload an index.html for the repo',
Пример #21
0
 def function():
     i = logger.add(NonSafeSink(0.1), format="{message}", catch=False)
     logger.debug("AAA")
     logger.info("BBB")
     logger.success("CCC")
     logger.remove(i)
Пример #22
0
 def wrapper(*args, **kwargs):
     state = cur_ctx().find_object(self.LogState)
     # get the verbose/quiet levels from context
     if state.verbose:
         log_level = "DEBUG"
     elif state.quiet:
         log_level = "ERROR"
     else:
         log_level = self._stderr_log_level
     logger.remove()  # remove existing default logger
     logger.add(
         sys.stderr, level=log_level, format=self.stderr_format_func
     )
     if logfile and state.logfile:  # start a log file
         # If a subcommand was used, log to a file in the
         # logs/ subdirectory with the subcommand in the file name.
         if log_dir_parent is not None:
             self._log_dir_parent = log_dir_parent
         if self._log_dir_parent is None:
             log_dir_path = Path(".") / "logs"
         else:
             log_dir_path = Path(self._log_dir_parent)
         subcommand = cur_ctx().invoked_subcommand
         if subcommand is None:
             subcommand = state.subcommand
         if subcommand is not None:
             logfile_prefix = f"{self._name}-{subcommand}"
         else:
             logfile_prefix = f"{self._name}"
         if log_dir_path.exists():
             log_numbers = [
                 f.name[len(logfile_prefix) + 1 : -4]
                 for f in log_dir_path.glob(
                     logfile_prefix + "_*.log"
                 )
             ]
             log_number_ints = sorted(
                 [int(n) for n in log_numbers if n.isnumeric()]
             )
             if len(log_number_ints) > 0:
                 log_number = log_number_ints[-1] + 1
                 if (
                     self._retention is not None
                     and len(log_number_ints) > self._retention
                 ):
                     for remove in log_number_ints[
                         : len(log_number_ints) - self._retention
                     ]:
                         (
                             log_dir_path
                             / f"{logfile_prefix}_{remove}.log"
                         ).unlink()
             else:
                 log_number = 0
         else:
             log_number = 0
         if self._retention == 0:
             state.logfile_path = (
                 log_dir_path / f"{logfile_prefix}.log"
             )
         else:
             state.logfile_path = (
                 log_dir_path / f"{logfile_prefix}_{log_number}.log"
             )
         state.logfile_handler_id = logger.add(
             str(state.logfile_path), level=self._file_log_level
         )
     logger.debug(f'Command line: "{" ".join(sys.argv)}"')
     logger.debug(f"{self._name} version {self._version}")
     logger.debug(
         f"Run started at {str(self.start_times['Total']['wall'])[:SKIP_FIELDS]}"
     )
     return user_func(*args, **kwargs)
Пример #23
0
def main():

    start_time = time()
    file_counter = 0

    #############################
    # logging
    if not os.path.exists("./log"):
        os.mkdir("./log")
    logger.remove()  # don't put messages into notebook output
    logger.add("./log/apobec_snp_rate_log_{time}.txt", backtrace=False)
    logger.add(sys.stderr, level="CRITICAL")
    ############################################

    if os.path.exists("./input_data"):
        input_files = get_input_files_names("./input_data")

        num_files = len(input_files)

        print("""
               ---------------
               job started at {0} ...
               ---------------
               """.format(get_current_time()))

        print("collecting snp...")
        progress_bar = IntProgress(min=0, max=num_files, bar_style='success')
        display(progress_bar)

        series_container = []
        for f in input_files:
            try:
                ref_seq = get_ref(f)
                snpes_per_read = count_snp_per_read(ref_seq, f)
                series_container.append(snpes_per_read)
                progress_bar.value += 1
                file_counter += 1
            except Exception:
                logger.info(
                    """\n\t==================================================
                    file: {0} 
                    ====================================================""".
                    format(f))
                logger.exception("")
                print("exception detected. see log for more details")
                progress_bar.value += 1

        print("snp collected...")

        try:
            df_raw_count = pd.DataFrame(series_container).T
            # drop first raw which is a reference to itself comparison
            df_raw_count.drop([0], inplace=True)
            pivot = df_raw_count.describe()
            mode, median = get_mode_median(df_raw_count)
            pivot_df = pd.concat([pivot, mode, median])

            # renaming index values in pivot df
            pivot_df.rename(index={
                'count': 'reads total',
                'mean': 'mean snp per read',
                'min': 'min number of snp',
                'max': 'max number of snp',
                'mode': 'mode snp',
                'median': 'median snp'
            },
                            inplace=True)

            pivot_df = calculate_non_mutated(pivot_df, df_raw_count).T

            time_stamp = get_current_time()
            create_strip_plot(df_raw_count, time_stamp)
            print("plot consctructed...")

            df_raw_count = df_raw_count.T  # transpose after plotting
            save_df(pivot_df, df_raw_count, time_stamp)
            print("spreadsheet saved...")

            finish_time = time()
            total_time = finish_time - start_time
            show_report(total_time, file_counter)
        except Exception:
            logger.info(
                """\n\t==================================================""")
            logger.exception("")
            print("exception detected. see log for more details")

    else:
        os.mkdir("./input_data")
        print("""
        Houston, we have a problem...
        --------
        folder 'input_data' doesn't exist in the current directory 
        or may be you've created it but misspelled its name.
        Anyway, it has just been created by this script.
        Paste your 'fasta' files into the folder 'input_data' 
        and run this script again.
        --------
        """)
Пример #24
0
import os

os.environ.setdefault("LOGURU_INFO_COLOR", "<b>")
os.environ.setdefault("LOGURU_DEBUG_COLOR", "<b>")
os.environ.setdefault("LOGURU_ERROR_COLOR", "<b>")
os.environ.setdefault("LOGURU_WARNING_COLOR", "<b>")

from loguru import logger

logger.remove(handler_id=None)
logger.add("russia_{time}.log",
           rotation="100MB",
           encoding="utf-8",
           colorize=False)
Пример #25
0
    Time,
    ensure_datetime,
    get_masked_array,
    get_unmasked,
    replace_cube_coord,
    shorten_columns,
    shorten_features,
)

if "TQDMAUTO" in os.environ:
    from tqdm.auto import tqdm
else:
    from tqdm import tqdm

loguru_logger.enable("alepython")
loguru_logger.remove()
loguru_logger.add(sys.stderr, level="WARNING")

logger = logging.getLogger(__name__)
enable_logging("jupyter")

warnings.filterwarnings("ignore", ".*Collapsing a non-contiguous coordinate.*")
warnings.filterwarnings("ignore", ".*DEFAULT_SPHERICAL_EARTH_RADIUS.*")
warnings.filterwarnings("ignore", ".*guessing contiguous bounds.*")

warnings.filterwarnings(
    "ignore", 'Setting feature_perturbation = "tree_path_dependent".*'
)

normal_coast_linewidth = 0.3
mpl.rc("figure", figsize=(14, 6))
Пример #26
0
def cli(log_level: str):
    logger.remove()
    logger.add(sys.stdout, level=log_level)
import os
import argparse
import sys
import getpass
import json
import cgi
import datetime
from urllib.parse import unquote


D2L_BASEURL = "https://mycourses.rit.edu/"

# Not sure if this is unique to me, or just unique to RIT's tenant
OU = 6605

logger.remove()
logger.add(sys.stderr, level="INFO")

# basically, mkdir -p /blah/blah/blah
def mkdir_recursive(path):
    try:
        os.makedirs(path, exist_ok=True)
    except Exception as e:
        logger.error("Exception: {}".format(e))
        exit(1)


def get_xfrs_token(page_html):
    """
    Method to parse a D2L page to find the XSRF.Token. The token is returned as a string
    :param page_html:
Пример #28
0
 def init_logger(cls):
     logger.remove()
     logger.add(sys.stdout, colorize=True, format="{message}")