Ejemplo n.º 1
0
import os

from loguru import logger

from .settings import BASE_DIR

logger.remove(handler_id=None)
log_file = os.path.join(BASE_DIR, "log/file_{time}.log")
logger.add(log_file, backtrace=True, retention="5 min", encoding="utf8")
Ejemplo n.º 2
0
from lib.config import DEBUG_LOG
from lib.resize import max_resize
from widgets.main_window import main_window

if __name__ == "__main__":
    args = sys.argv

    # start app
    appctxt = ApplicationContext()
    app = appctxt.app

    # prepare the logger
    logger.add(
        DEBUG_LOG,
        rotation="1 MB",
        retention="1 week",
        backtrace=True,
        diagnose=True,
        enqueue=True,
    )
    logger.info("-----------------------")
    logger.info("Launching application")

    try:
        # create instance of main window
        main_window = main_window(app, appctxt)
        # build the main window
        main_window.build()
        main_window.set_theme()

        # load data
        main_window.main_widget.find_sim()
Ejemplo n.º 3
0
def main(arguments: argparse.Namespace) -> ExitCode:
    logger.remove()
    logger.add(sys.stderr, level=arguments.logging_level)
    logger.debug("Called with {!r}", arguments)

    if not (arguments.postgres_connection_string
            and arguments.synapse_auth_token):
        logger.error(
            "Postgres connection string and Synapse auth token can not be empty"
        )
        return ExitCode.Failure

    try:
        db = postgres.Postgres(arguments.postgres_connection_string)
    except (psycopg2.OperationalError, psycopg2.InternalError):
        logger.exception(
            "Connecting to database using {!r} failed:",
            arguments.postgres_connection_string,
        )
        return ExitCode.Failure

    session = requests.session()
    session.headers["Authorization"] = "Bearer " + arguments.synapse_auth_token

    before_date = get_delta_date(int(arguments.delta))
    before_date_string = before_date.isoformat(sep=" ", timespec="seconds")
    logger.info("Purging events up to: {} UTC", before_date_string)

    # Purge room history for all rooms
    rooms = get_room_record_ids(db)
    room_count = len(rooms)
    for index, room_id in enumerate(rooms, start=1):
        logger.info("({}/{}) Processing room: {!r}...", index, room_count,
                    room_id)
        event_id = get_last_event_id(db, room_id, before_date)
        if event_id is None:
            logger.info(
                "No event ID before: {} UTC for room: {!r}, skipping",
                before_date_string,
                room_id,
            )
            continue

        logger.info(
            "Last event ID before: {} UTC for room {!r}: {!r}",
            before_date_string,
            room_id,
            event_id,
        )
        purge_id = purge_history(session, arguments.api_url, room_id, event_id)
        logger.info("Purging room: {!r} in progress: {!r}...", room_id,
                    purge_id)
        result = wait_for_purge(session, arguments.api_url, purge_id)
        logger.info("Purged room: {!r} with status: {!r}", room_id, result)

    logger.info("Purging local media older than: {} UTC...",
                before_date_string)
    local_media = get_local_media_record_ids(db, before_date)
    important_files = get_important_media_ids(db)

    # Purge local media manually
    old_media = set(local_media) - important_files
    old_media_count = len(old_media)
    logger.info("{} media to be cleaned...", len(old_media))
    for index, media_id in enumerate(old_media, start=1):
        logger.info("({}/{}) Processing media: {!r}...", index,
                    old_media_count, media_id)
        paths = get_local_media_paths(arguments.media_store, media_id)
        for path in paths:
            if not os.path.isfile(path):
                logger.debug("{!r} could not be found or is not a file", path)
                continue
            os.remove(path)

        delete_local_media_record(db, media_id)

    # Purge remote media
    logger.info("Purging cached remote media older than: {} UTC...",
                before_date_string)
    result = purge_remote_media(session, arguments.api_url, before_date)
    logger.info("Purged cached remote media: {!r}", result)
    return ExitCode.Success
Ejemplo n.º 4
0
        for _ in range(self.CONCURREND_DOWNLOADS):
            token_queue.put(token)
        logger.debug("ETLWorker | read Token and prepared Queues")
        return token_queue, insert_queue

    def load(self, id: int, token_queue: mp.Queue):
        logger.debug("Get token")
        token = token_queue.get()
        logger.info(f"Load data with pseudo_token {self.token}")
        response = requests.get(f"https://pokeapi.co/api/v2/pokemon/{id}")
        logger.info(f"Received answer with code {response.status_code}")
        token_queue.put(token)
        logger.debug("Put token")
        self.data = response.json()

    def transform(self) -> Pokemon:
        logger.debug("Start Transformation")
        time.sleep(10)  # make this function a little more expansive
        id = self.data["id"]
        name = self.data["name"]
        base_stat = self.data["stats"][0]["base_stat"]
        return Pokemon(id=id, name=name, base_stat=base_stat)


if __name__ == '__main__':
    logger.level("DEBUG")
    logger.add("download.log")
    etl = ETLWorker()
    etl.main()
    logger.success("Done")
Ejemplo n.º 5
0
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
   File Name:     test_log
   Description :
   Author :       xmz
   date:          2019/4/22
-------------------------------------------------
"""
import os
import sys

from loguru import logger

logger.remove()
logger.add(sys.stdout, colorize=True,
           format="<yellow>{level}</yellow> | <red>{file}</red> | <red>{module}</red> | " \
                  "<green>{time:YYYY-MM-DD at HH:mm:ss}</green> | <level>{message}</level>",
           level="INFO")
logger.add(
    os.path.join("../../logs", "log_{time}.txt"),
    enqueue=True,
    colorize=True,
    format=
    "{level} | {file} | {module} | {time:YYYY-MM-DD at HH:mm:ss} | {message}",
    level="INFO")

logger.info("sth")
logger.info("sb")
logger.info("If you're using Python {:.2f}, prefer  of course!", 3.6333)
Ejemplo n.º 6
0
from logging import exception
import requests
import pandas as pd
from loguru import logger

logger.add("ygezuzhu.log",encoding="utf8")


class ToExcle():
    """生成exlce方法"""

    @classmethod
    def dict_to_excle(self, file_path=None, rows=None):
        """
        data/sql-->dict-->excle
        数据类型为字典转excle
        """

        # 1.取数据
        df1 = pd.DataFrame(rows)
        # 2.保存至excel文件
        writer = pd.ExcelWriter(file_path)
        df1.to_excel(writer, encoding='utf8', index=False)
        # 6.保存
        writer.save()

    @classmethod
    def tuple_to_excle(cls, file_path=None, rows=None, columns=None):
        """
        data/sql-->tuple-->excle
        数据类型为元组/列表转excle
from nltk.tokenize import sent_tokenize
from pickle import load
from langdetect import detect
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
from sklearn.svm import LinearSVC, SVC
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from nltk import ngrams

logger.remove()
logger.add(sys.stdout, level="INFO")


def start_cr_evaluator():
    path_to_store_result = os.path.join(os.getcwd(), "cr_evaluator_results")
    os.makedirs(path_to_store_result, exist_ok=True)

    logger.info(
        "Start Analysis to find all privacy policy page without explicit acceptance"
    )
    # All dir inside log directory
    path_log = os.path.join(os.getcwd(), "logs")
    dir_inside_log = [x[0] for x in os.walk(path_log)]
    dir_inside_log.pop(0)
    logger.info(
        "Load ML Model for each practices and performed_not_performed and third_party_first_party"
Ejemplo n.º 8
0
def main():
    """ Performance test with locust: parse command line options and run commands.
    """
    print(f"HttpRunner version: {__version__}")
    sys.argv[0] = 'locust'
    if len(sys.argv) == 1:
        sys.argv.extend(["-h"])

    if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
        start_locust_main()

    def get_arg_index(*target_args):
        for arg in target_args:
            if arg not in sys.argv:
                continue

            return sys.argv.index(arg) + 1

        return None

    # set logging level
    loglevel_index = get_arg_index("-L", "--loglevel")
    if loglevel_index and loglevel_index < len(sys.argv):
        loglevel = sys.argv[loglevel_index]
        loglevel = loglevel.upper()
    else:
        # default
        loglevel = "WARNING"

    logger.remove()
    logger.add(sys.stdout, level=loglevel)

    # get testcase file path
    try:
        testcase_index = get_arg_index("-f", "--locustfile")
        assert testcase_index and testcase_index < len(sys.argv)
    except AssertionError:
        print("Testcase file is not specified, exit.")
        sys.exit(1)

    testcase_file_path = sys.argv[testcase_index]
    sys.argv[testcase_index] = parse_locustfile(testcase_file_path)

    if "--processes" in sys.argv:
        """ locusts -f locustfile.py --processes 4
        """
        if "--no-web" in sys.argv:
            logger.error("conflict parameter args: --processes & --no-web. \nexit.")
            sys.exit(1)

        processes_index = sys.argv.index('--processes')
        processes_count_index = processes_index + 1
        if processes_count_index >= len(sys.argv):
            """ do not specify processes count explicitly
                locusts -f locustfile.py --processes
            """
            processes_count = multiprocessing.cpu_count()
            logger.warning(f"processes count not specified, use {processes_count} by default.")
        else:
            try:
                """ locusts -f locustfile.py --processes 4 """
                processes_count = int(sys.argv[processes_count_index])
                sys.argv.pop(processes_count_index)
            except ValueError:
                """ locusts -f locustfile.py --processes -P 8888 """
                processes_count = multiprocessing.cpu_count()
                logger.warning(f"processes count not specified, use {processes_count} by default.")

        sys.argv.pop(processes_index)
        run_locusts_with_processes(sys.argv, processes_count)
    else:
        start_locust_main()
Ejemplo n.º 9
0
"""
============================
Author:古一
Time:2020/10/28
E-mail:[email protected]
============================
"""
import os

import pytest
from loguru import logger

logger.add('./log/{time}.log',
           rotation='20 MB',
           retention='1 week',
           encoding='utf-8')
pytest.main(['-s', r"--alluredir=report/json", "--clean-alluredir"])
os.system('allure generate ./report/json -o ./report/html -c')
# pytest.main(['-s'])
Ejemplo n.º 10
0
def main():
    det_data_dir = Path(os.getenv('det_data'))
    task_data_dir = det_data_dir / "Task011_Kits"
    source_data_dir = task_data_dir / "raw"

    if not source_data_dir.is_dir():
        raise RuntimeError(
            f"{source_data_dir} should contain the raw data but does not exist."
        )

    splitted_dir = task_data_dir / "raw_splitted"
    target_data_dir = task_data_dir / "raw_splitted" / "imagesTr"
    target_data_dir.mkdir(exist_ok=True, parents=True)
    target_label_dir = task_data_dir / "raw_splitted" / "labelsTr"
    target_label_dir.mkdir(exist_ok=True, parents=True)

    logger.remove()
    logger.add(sys.stdout, level="INFO")
    logger.add(task_data_dir / "prepare.log", level="DEBUG")

    # save meta info
    dataset_info = {
        "name": "Kits",
        "task": "Task011_Kits",
        "target_class": None,
        "test_labels": True,
        "seg2det_stuff": [
            1,
        ],  # define stuff classes: kidney
        "seg2det_things": [
            2,
        ],  # define things classes: tumor
        "min_size": 3.,
        "labels": {
            "0": "lesion"
        },
        "labels_stuff": {
            "1": "kidney"
        },
        "modalities": {
            "0": "CT"
        },
        "dim": 3,
    }
    save_json(dataset_info, task_data_dir / "dataset.json")

    # prepare cases
    cases = [str(c.name) for c in source_data_dir.iterdir() if c.is_dir()]
    for c in maybe_verbose_iterable(cases):
        logger.info(f"Copy case {c}")
        case_id = int(c.split("_")[-1])
        if case_id < 210:
            shutil.copy(source_data_dir / c / "imaging.nii.gz",
                        target_data_dir / f"{c}_0000.nii.gz")
            shutil.copy(source_data_dir / c / "segmentation.nii.gz",
                        target_label_dir / f"{c}.nii.gz")

    # create an artificial test split
    create_test_split(
        splitted_dir=splitted_dir,
        num_modalities=1,
        test_size=0.3,
        random_state=0,
        shuffle=True,
    )
Ejemplo n.º 11
0
import json
import requests
from loguru import logger
import time, os

logger.add(
    os.path.join(os.path.dirname(os.path.dirname(__file__)), "logs",
                 "weibo.log"),
    level='DEBUG',
    format='{time:YYYY-MM-DD HH:mm:ss} - {level} - {file} - {line} - {message}',
    rotation="10 MB")


class weiboMonitor(object):
    def __init__(self, uuids):
        self.uuids = uuids
        self.session = requests.session()
        self.reqHeaders = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
            'Content-Type': 'application/x-www-form-urlencoded',
            'Referer': 'https://passport.weibo.cn/signin/login',
            'Connection': 'close',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
        }

    @logger.catch
    def monitor_singer(self, uuid):
        user_info = 'https://m.weibo.cn/api/container/getIndex?uid=%s&type=uid&value=%s' % (
            uuid, uuid)
        con_id = ''
Ejemplo n.º 12
0
def test_disabled_logger_in_sink(sink_with_logger):
    sink = sink_with_logger(logger)
    logger.disable("tests.conftest")
    logger.add(sink, format="{message}")
    logger.info("Disabled test")
    assert sink.out == "Disabled test\n"
Ejemplo n.º 13
0
def test_file_sink_utf8_encoding(tmpdir):
    file = tmpdir.join("test.log")
    logger.add(str(file), encoding="utf8", format="{message}", errors="strict", catch=False)
    logger.info("天")
    logger.remove()
    assert file.read_text("utf8") == "天\n"
Ejemplo n.º 14
0
def contextualize_test():
    logger.add(get_log_file("context"), format="{extra[ip]} {extra[user]} {message}", serialize=True)
    logger_ctx = logger.bind(ip="127.0.0.1", user="******")
    logger_ctx.info("Contextualize logger.")
    logger_ctx.bind(user="******").info("inline binding other user.")
Ejemplo n.º 15
0
def test_level_too_high(writer, level):
    logger.add(writer, level=level, format="{message}")
    logger.info("Test level")
    assert writer.read() == ""
Ejemplo n.º 16
0
import utils
import requests
import json
import os.path
from pprint import pformat
from oauth2creds import get_credentials
from loguru import logger

cfg = utils.config()
logger.add("gphoto_upload.log", rotation="1 MB")


def upload_to_gphotos(filepath, filename=None):
    if filename is None:
        filename = os.path.basename(filepath)
    response = _upload_binary_media(filepath, filename)
    if not response.ok:
        success = False
        elapsed = response.elapsed.microseconds/1_000_000

    else:
        success, elapsed = _insert_new_photo(response.text)
    return success, elapsed

def _upload_binary_media(filepath, filename):
    creds = get_credentials()
    with open(filepath, "rb") as photo_fp:
        binary_file = photo_fp.read()
    url = r"https://photoslibrary.googleapis.com/v1/uploads"
    headers = {
        "Content-type": "application/octet-stream",
Ejemplo n.º 17
0
def test_invalid_level(writer, level):
    with pytest.raises(TypeError):
        logger.add(writer, level=level)
Ejemplo n.º 18
0
from loguru import logger
import sys

logger.remove(0)
logger.add(sys.stdout,
           colorize=True,
           format="{level}: <level>{message}</level>")
Ejemplo n.º 19
0
def test_unknown_level(writer, level):
    with pytest.raises(ValueError):
        logger.add(writer, level=level)
Ejemplo n.º 20
0
def main():
    parser = argparse.ArgumentParser(
        description='Scene Classification Training')
    parser.add_argument('--device',
                        default='cuda:0',
                        type=str,
                        required=False,
                        help='GPU ids')
    parser.add_argument('--epoch',
                        default=350,
                        type=int,
                        required=True,
                        help='training epochs')
    parser.add_argument('--alldata',
                        dest='alldata',
                        action='store_true',
                        help='use alldata to train')
    parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
    parser.add_argument('--weight_decay',
                        '-w',
                        default=5e-4,
                        type=float,
                        help='weight_decay')
    parser.add_argument('--batch_size',
                        default=128,
                        type=int,
                        help='training batch size')
    parser.add_argument('--output_dir', default='./checkpoint', type=str)
    parser.add_argument('--warm_up_epochs', default=10, type=int)
    parser.add_argument('--log_file', type=str, default='./log/default.log')
    parser.add_argument('--params', type=str, default=None)
    args = parser.parse_args()

    log_file_name = args.log_file
    logger.add(log_file_name)
    logger.info('args:\n' + args.__repr__())

    batch_size = args.batch_size
    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    device = args.device
    best_acc = 0  # best test accuracy

    # Data
    logger.info('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(brightness=0.5,
                               contrast=0.5,
                               saturation=0.5,
                               hue=0.5),
        transforms.RandomGrayscale(p=0.1),
        transforms.ToTensor(),
    ])

    transform_test = transforms.Compose([
        transforms.ToPILImage(),  #适应自己编写的dataset
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor()
    ])
    train_dir = os.path.join(DATA_PATH, 'train/train')
    if args.alldata == False:
        whole_set = SceneDataset(
            annotations_csv='/home/linhw/myproject/data_scene/train_labels.csv',
            root_dir=train_dir,
            transform=transform_train)

        whole_set2 = SceneDataset(
            annotations_csv='/home/linhw/myproject/data_scene/train_labels.csv',
            root_dir=train_dir,
            transform=transform_test)
        whole_len = len(whole_set)
        train_len = int(whole_len * 0.9)  #划分train和val数据集
        val_len = whole_len - train_len
        indices = random.sample(range(0, whole_len), train_len)
        indices2 = []
        for x in range(0, whole_len):
            if x not in indices:
                indices2.append(x)

        trainset = torch.utils.data.Subset(whole_set, indices)
        testset = torch.utils.data.Subset(whole_set2, indices2)
    else:
        trainset = SceneDataset(
            annotations_csv='/home/linhw/myproject/data_scene/train_labels.csv',
            root_dir=train_dir,
            transform=transform_train)
        testset = torch.utils.data.Subset(
            SceneDataset(annotations_csv=
                         '/home/linhw/myproject/data_scene/train_labels.csv',
                         root_dir=train_dir,
                         transform=transform_test), range(8000)
        )  # when whole data is True, randomly choose 8000 training samples. Test acc here is training acc.

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=8)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8)

    # Model
    logger.info('==> Building model..')
    model = EfficientNet.from_name('efficientnet-b0', num_classes=100)
    model = model.to(device)
    if args.params is not None:
        model.load_state_dict(torch.load(args.params, map_location=device))

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=args.weight_decay)
    #optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    warm_up_with_cosine_lr = lambda epoch: epoch / args.warm_up_epochs if epoch <= args.warm_up_epochs else 0.5 * (
        math.cos((epoch - args.warm_up_epochs) /
                 (args.epoch - args.warm_up_epochs) * math.pi) + 1)
    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=warm_up_with_cosine_lr)

    best_acc = 0
    for epoch in range(args.epoch):
        logger.info("Epoch {} started".format(epoch))

        train_acc, training_loss = train(model, optimizer, trainloader, device)
        logger.info(
            "train acc = {:.4f}, training loss = {:.4f} lr = {:.4f}".format(
                train_acc, training_loss, warm_up_with_cosine_lr(epoch)))

        test_acc, test_loss = test(model, testloader, device)
        logger.info("test acc = {:.4f}, test loss = {:.4f}".format(
            test_acc, test_loss))

        if test_acc > best_acc:
            best_acc = test_acc
            logger.info("best acc improved to {:.4f}".format(best_acc))
            model_to_save = model.module if hasattr(model, 'module') else model
            torch.save(model_to_save.state_dict(),
                       '{}/bset_model.pt'.format(output_dir))
            logger.info("model saved to {}/bset_model.pt".format(output_dir))

        model_to_save = model.module if hasattr(model, 'module') else model
        torch.save(model_to_save.state_dict(),
                   '{}/last_model.pt'.format(output_dir))
        logger.info("model saved to {}/last_model.pt".format(output_dir))

        model_to_save = model.module if hasattr(model, 'module') else model
        torch.save(model_to_save.state_dict(),
                   '{}/model.pt'.format(output_dir))
        logger.info("model saved to {}/model.pt".format(output_dir))
        scheduler.step()

        logger.info("Epoch {} ended, best acc = {:.4f}".format(
            epoch, best_acc))

    logger.info("Training finished, best_acc = {:.4f}".format(best_acc))
Ejemplo n.º 21
0
def test_level_low_enough(writer, level):
    logger.add(writer, level=level, format="{message}")
    logger.info("Test level")
    assert writer.read() == "Test level\n"
Ejemplo n.º 22
0
import sys
from loguru import logger

logger.remove()
logger.add(sys.stderr, format="", colorize=False, backtrace=True)


@logger.catch()
def a():
    1 / 0


def b():
    2 / 0


def c():
    3 / 0


a()

with logger.catch():
    b()

try:
    c()
except ZeroDivisionError:
    logger.exception("")
Ejemplo n.º 23
0
        z_cs = pickle.load(f)
    exp_args = load_yaml_args(args.config)
    x_cs_discrete = get_discrete_configspace(x_cs, exp_args.x_grid_size)
    z_cs_discrete = get_discrete_configspace(z_cs,
                                             exp_args.z_grid_size,
                                             fidelity_space=True)
    config_spaces = dict(x=x_cs,
                         x_discrete=x_cs_discrete,
                         z=z_cs,
                         z_discrete=z_cs_discrete)

    # Logging details
    log_suffix = time.strftime("%x %X %Z")
    log_suffix = log_suffix.replace("/", '-').replace(":",
                                                      '-').replace(" ", '_')
    logger.add("{}/logs/collator_{}.log".format(path, log_suffix),
               **_logger_props)
    print("Logging at {}/logs/collator_{}.log".format(path, log_suffix))

    initial_file_list = os.listdir(path)
    # task_datas = dict()

    while True:
        task_datas = dict()
        # list available tasks
        task_ids = [
            int(tid) for tid in os.listdir(dump_path)
            if len(os.listdir(os.path.join(dump_path, tid))) > 0
        ]
        if len(task_ids) == 0:
            continue
        batch_size = args.max_batch_size // len(task_ids)
Ejemplo n.º 24
0
from loguru import logger
# 基本使用
logger.debug('this is a debug message')
# 将结果输入到一个runtime。log文件里面
# logger.add('runtime.log')
logger.debug('this is a debug message')
# for x in range(1000):
#     logger.info('%s' % x)

# logger.add('runtime1.log', format="{time} {level} {message}", filter="my_module", level="INFO")
# logger.add('runtime1.log', format="{time} {level} {message}", filter="my_module", level="DEBUG")

from loguru import logger

trace = logger.add('runtime2.log')
logger.debug('this is a debug message11')
# logger.remove(trace)
# logger.debug('this is another debug message11')

# rotation 配置
# 通过这样的配置我们就可以实现每 500MB 存储一个文件,每个 log 文件过大就会新创建一个 log 文件。我们在配置 log
#  名字时加上了一个 time 占位符,这样在生成时可以自动将时间替换进去,生成一个文件名包含时间的 log 文件。
# logger.add('runtime_{time}.log', rotation="1024 MB")
# 配置为1k大的文件
# logger.add('runtime_{time}.log', rotation="1024 B")
# logger.add('runtime_{time}.log', rotation="10 MB")
# logger.info()
# for x in range(10000000000000):
#     logger.info('%s' % x)
Ejemplo n.º 25
0
from tqdm import tqdm
from vidaug import augmentors as va
import math
import copy
from sklearn.model_selection import train_test_split, KFold
import functools
from collections import Counter

import torch
import pandas as pd
import torch.utils.data as data
from PIL import Image

DATASET_PATH = '../GTA_dataset'
JPG_PATH = '../GTA_JPG_DATASET'
logger.add(sys.stdout)



def load_dataset_config(config_path)->Dict[str, List[str]]:
    with open(config_path) as file:
        dataset_conf = yaml.load(file, Loader=yaml.FullLoader)
    class_map = dataset_conf['class_map']
    return class_map


def pil_loader(path):
    # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
    with open(path, 'rb') as f:
        with Image.open(f) as img:
            return img.convert('RGB')
Ejemplo n.º 26
0
import backtrader as bt
from math import floor
import logging
from loguru import logger

from initialize import APP_PATH

logging.basicConfig(level=logging.DEBUG,
                    filename='app.log',
                    filemode='w',
                    format='%(name)s - %(levelname)s - %(message)s')

logger.add(
    f"{APP_PATH}/logs/update_eri_dates_{datetime.date.today().strftime('%Y%m%d')}.log",
    rotation="5 MB",
    backtrace=True,
    diagnose=True)


class GenericStrategy(bt.Strategy):
    params = dict(window_s=21,
                  window_m=50,
                  window_l=100,
                  window_xs=5,
                  risk=0.05,
                  stop_dist=0.05,
                  dev_multiplier=2)

    def __init__(self):
        # turn on history
Ejemplo n.º 27
0
def create_logger():
    """Create custom logger."""
    custom_logger.remove()
    custom_logger.add(stdout, colorize=True, format=formatter)
    return custom_logger
Ejemplo n.º 28
0
Archivo: model.py Proyecto: vzhong/gazp
    def run_train(self, train, dev, args=None, verbose=True, eval_train=False):
        args = args or self.args
        if not os.path.isdir(self.dout):
            os.makedirs(self.dout)
        logger_loop = logger.bind(type='train_loop')

        logger_loop.info('Using config\n{}'.format(args))
        self.save_config()

        flog = self.get_file('train.log')
        fmetrics = open(self.get_file('train.metrics.jsonl'), 'wt')
        fbestmetrics = self.get_file('train.best.json')

        logger_loop.info('Logging to {}'.format(flog))
        logger.add(flog, rotation='5 MB', mode='wt', level='INFO', format="{time} {extra[type]} -- {message}")

        optimizer, scheduler = self.get_optimizer(train)
        iteration = 0
        start_epoch = 0
        if args.resume:
            metrics = self.load_save(fname=args.resume, optimizer=None if args.restart_optim else optimizer, scheduler=None if args.restart_optim or args.restart_scheduler else scheduler)
            start_epoch = metrics['epoch']
            iteration = metrics['iteration']
            logger_loop.info('Resuming from {}'.format(args.resume))
            logger_loop.info(pprint.pformat(metrics))
            self.args = args

        train_preds = None
        best = {}
        fbest = os.path.join(self.dout, 'best.tar')

        for epoch in tqdm.trange(start_epoch, args.epoch, desc='epoch'):
            logger_loop.info('Starting train epoch {}'.format(epoch))
            loss = defaultdict(lambda: 0)

            self.eval()
            train = train.reset()

            self.train()
            timing = defaultdict(list)
            for batch in train.batch(args.batch, shuffle=True, verbose=verbose, desc='train'):
                start_time = time.time()

                self.state.update({'iteration': iteration, 'epoch': epoch})
                feat = self.featurize(batch)
                out = self.forward(**feat)

                forward_time = time.time()

                loss_ = self.compute_loss(out, feat, batch)
                loss_backprop = 0
                if isinstance(loss_, dict):
                    for k, v in loss_.items():
                        loss['loss_' + k] += v.item() * len(batch)
                        loss_backprop += v
                else:
                    loss['loss'] += loss_.item() * len(batch)
                    loss_backprop += loss_

                iteration += len(batch)
                loss_backprop.backward()
                torch.nn.utils.clip_grad_norm_(self.parameters(), self.args.max_grad_norm)

                old_params = copy.deepcopy(list(self.named_parameters()))
                optimizer.step()

                # running avg
                new_params = dict(self.named_parameters())
                interp = self.args.running_avg
                for name, op in old_params:
                    new_params[name].data.copy_(interp*op.data + (1-interp)*new_params[name].data)
                self.load_state_dict(new_params)

                scheduler.step()
                optimizer.zero_grad()

                backward_time = time.time()

                if not eval_train:
                    train_preds = train.accumulate_preds(train_preds, self.extract_preds(out, feat, batch))

                timing['forward'].append(forward_time - start_time)
                timing['backward'].append(backward_time - forward_time)
                timing['batch'].append(backward_time - start_time)
            if eval_train:
                train_preds = self.run_pred(train, args, verbose=verbose, desc='train_pred')
            metrics = {'epoch': epoch, 'iteration': iteration}
            for k, v in loss.items():
                metrics[k] = v / len(train)
            for k, v in timing.items():
                metrics['time_{}'.format(k)] = sum(v) / len(v)
            metrics.update({'train_{}'.format(k): v for k, v in self.compute_metrics(train, train_preds).items()})

            dev_preds = self.run_pred(dev, args, verbose=verbose, desc='dev_pred')
            metrics.update({'dev_{}'.format(k): v for k, v in self.compute_metrics(dev, dev_preds).items()})

            fmetrics.write(json.dumps(metrics) + '\n')
            logger_loop.info('\n' + pprint.pformat(metrics))

            if self.better(metrics, best):
                best.update(metrics)
                logger_loop.info('Found new best! Saving checkpoint')
                self.save(metrics, optimizer, scheduler, fbest)
                self.write_preds(dev, dev_preds, self.get_file('dev.preds.json'))
                with open(fbestmetrics, 'wt') as f:
                    json.dump(metrics, f, indent=2)

        logger_loop.info('Loading best checkpoint from {}'.format(fbest))
        metrics = self.load_save(fname=fbest)
        logger_loop.info(pprint.pformat(metrics))
        fmetrics.close()
import time
from loguru import logger

from codelab_adapter_client import AdapterNode
from codelab_adapter_client.utils import get_or_create_node_logger_dir, install_requirement
from codelab_adapter_client.thing import AdapterThing
from codelab_adapter_client.utils import threaded

import cozmo
from cozmo.util import degrees, distance_mm, speed_mmps

# logger.warning(dict(os.environ))
# log for debug
node_logger_dir = get_or_create_node_logger_dir()
debug_log = str(node_logger_dir / "debug.log")
logger.add(debug_log, rotation="1 MB", level="DEBUG")


class CozmoProxy(AdapterThing):
    def __init__(self, node_instance):
        super().__init__(thing_name="Cozmo", node_instance=node_instance)
        self.disconnect_flag = False

    def _say_hi(self, robot):
        robot.say_text("hi").wait_for_completed()

    def list(self, timeout=5) -> list:
        if self.thing:
            # 检查是否连接正常
            return ["Cozmo"]
        try:
Ejemplo n.º 30
0
def structured_logging_test():
    logger.add(get_log_file("structured"), serialize=True)
    logger.debug(test_data)
Ejemplo n.º 31
0
CYCLE_TESTER = env.int('CYCLE_TESTER', 20)
# definition of getter cycle, it will get proxy every CYCLE_GETTER second
CYCLE_GETTER = env.int('CYCLE_GETTER', 100)

# definition of tester
TEST_URL = env.str('TEST_URL', 'http://www.baidu.com')
TEST_TIMEOUT = env.int('TEST_TIMEOUT', 10)
TEST_BATCH = env.int('TEST_BATCH', 20)
# TEST_HEADERS = env.json('TEST_HEADERS', {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
# })
TEST_VALID_STATUS = env.list('TEST_VALID_STATUS', [200, 206, 302])

# definition of api
API_HOST = env.str('API_HOST', '0.0.0.0')
API_PORT = env.int('API_PORT', 5555)
API_THREADED = env.bool('API_THREADED', True)

# flags of enable
ENABLE_TESTER = env.bool('ENABLE_TESTER', True)
ENABLE_GETTER = env.bool('ENABLE_GETTER', True)
ENABLE_SERVER = env.bool('ENABLE_SERVER', True)

logger.add(env.str('LOG_RUNTIME_FILE', 'runtime.log'),
           level='DEBUG',
           rotation='1 week',
           retention='20 days')
logger.add(env.str('LOG_ERROR_FILE', 'error.log'),
           level='ERROR',
           rotation='1 week')
import argparse
import sys
import getpass
import json
import cgi
import datetime
from urllib.parse import unquote


D2L_BASEURL = "https://mycourses.rit.edu/"

# Not sure if this is unique to me, or just unique to RIT's tenant
OU = 6605

logger.remove()
logger.add(sys.stderr, level="INFO")

# basically, mkdir -p /blah/blah/blah
def mkdir_recursive(path):
    try:
        os.makedirs(path, exist_ok=True)
    except Exception as e:
        logger.error("Exception: {}".format(e))
        exit(1)


def get_xfrs_token(page_html):
    """
    Method to parse a D2L page to find the XSRF.Token. The token is returned as a string
    :param page_html:
    :return:
Ejemplo n.º 33
0
import os
import os.path
import shutil
import datetime
from loguru import logger
from pathlib import Path

import mongoengine as me
from me_models import Db_connect, Queue, State, Gphoto, SourceList

from utils import file_md5sum, config
from gphoto_upload import upload_to_gphotos
from drive_walk import GphotoSync

cfg = config()
logger.add("app.log", rotation="1 MB")
Db_connect()

# TODO: Change logging levels to eliminate most logging


def main():
    QueueWorker()
    print("Main Done")
    """
    Queue maintenance runs continuously. 
    """


class QueueWorker:
    def __init__(self):