示例#1
0
def test_create_logger():
    path1 = os.path.join(TMP_DIR, "1.log")
    path2 = os.path.join(TMP_DIR, "2.log")

    log1 = log.create_logger("LogOne", lvl="DEBUG", path=path1)
    log2 = log.create_logger("LogTwo", lvl="INFO", path=path2)
    log3 = log.create_logger("LogThree", lvl="ERROR", path=path2)

    log_levels(log1)
    log_levels(log2)
    log_levels(log3)

    with open(path1) as file1:
        lines = file1.readlines()
        assert len(lines) == len(EXPECTED_LOGS)
        for line, expected_log in zip(lines, EXPECTED_LOGS):
            assert expected_log.format("LogOne") in line

    with open(path2) as file2:
        lines = file2.readlines()
        assert len(lines) == 4 + 2
        for line, expected_log in zip(lines[:4], EXPECTED_LOGS[-4:]):
            assert expected_log.format("LogTwo") in line
        for line, expected_log in zip(lines[4:], EXPECTED_LOGS[-2:]):
            assert expected_log.format("LogThree") in line
示例#2
0
    def start_test(self, artifact_path, test_name, test_location, slaveid):
        if not slaveid:
            slaveid = "Master"
        test_ident = "{}/{}".format(test_location, test_name)
        if slaveid in self.store:
            if self.store[slaveid].in_progress:
                print "Test already running, can't start another, logger"
                return None
        self.store[slaveid] = self.Test(test_ident)
        self.store[slaveid].in_progress = True
        artifacts = []
        os_filename = self.ident + "-" + "cfme.log"
        os_filename = os.path.join(artifact_path, os_filename)
        if os.path.isfile(os_filename):
            os.remove(os_filename)
        artifacts.append(os_filename)
        self.store[slaveid].logger = create_logger(self.ident + test_name, os_filename)
        self.store[slaveid].logger.setLevel(self.level)

        for log_name in artifacts:
            desc = log_name.rsplit("-", 1)[-1]
            self.fire_hook('filedump', test_location=test_location, test_name=test_name,
                description=desc, slaveid=slaveid, contents="", file_type="log",
                display_glyph="align-justify", dont_write=True, os_filename=log_name,
                group_id="pytest-logfile")
示例#3
0
def main2(args):
    print "Running..."

    # get extracted wikipedia file pathnames
    subdirs = io.list_directories(config.WIKIPEDIA_EXTRACTED_DIR)
    if args.letters:
        subdirs = [p for p in subdirs if p[-2] in config.WIKIPEDIA_SUB_DIR_PREFIXES]
    pathnames = []
    for sb in subdirs:
        pathnames.extend(io.list_files(sb))
    pathnames.sort()
    
    # create thread-safe queue
    queue = parallel.create_queue(pathnames)

    # create workers
    workers = []
    for i in xrange(args.threads):
        logger = log.create_logger('LOGGER %d' % i, 'log_%d.log' % i)
        sent_tokenizer = parser.get_sentence_tokenizer()
        if args.verbose:
            logger.setLevel(logging.DEBUG)
        worker = SentenceIndexWorker(queue, sent_tokenizer, logger)
        workers.append(worker)

    # begin
    for worker in workers:
        worker.start()

    # block until all files have been processed
    queue.join()

    print "Done!"
示例#4
0
    def start_test(self, artifact_path, test_name, test_location, slaveid):
        if not slaveid:
            slaveid = "Master"
        test_ident = "{}/{}".format(test_location, test_name)
        if slaveid in self.store:
            if self.store[slaveid].in_progress:
                print("Test already running, can't start another, logger")
                return None
        self.store[slaveid] = self.Test(test_ident)
        self.store[slaveid].in_progress = True
        artifacts = []
        os_filename = self.ident + "-" + "cfme.log"
        os_filename = os.path.join(artifact_path, os_filename)
        if os.path.isfile(os_filename):
            os.remove(os_filename)
        artifacts.append(os_filename)
        self.store[slaveid].logger = create_logger(self.ident + test_name,
                                                   os_filename)
        self.store[slaveid].logger.setLevel(self.level)

        for log_name in artifacts:
            desc = log_name.rsplit("-", 1)[-1]
            self.fire_hook('filedump',
                           test_location=test_location,
                           test_name=test_name,
                           description=desc,
                           slaveid=slaveid,
                           contents="",
                           file_type="log",
                           display_glyph="align-justify",
                           dont_write=True,
                           os_filename=log_name,
                           group_id="pytest-logfile")
示例#5
0
    def start_test(self, artifact_path, test_name, test_location, slaveid):
        if not slaveid:
            slaveid = "Master"
        test_ident = "{}/{}".format(test_location, test_name)
        if slaveid in self.tests:
            if self.tests[slaveid].in_progress:
                print "Test already running, can't start another"
                return None
        self.tests[slaveid] = self.Test(slaveid)
        self.tests[slaveid].in_progress = True
        artifacts = []
        os_filename = self.ident + "-" + "cfme.log"
        os_filename = os.path.join(artifact_path, os_filename)
        if os.path.isfile(os_filename):
            os.remove(os_filename)
        artifacts.append(os_filename)
        self.tests[slaveid].logger = create_logger(self.ident + test_name,
                                                   os_filename)
        self.tests[slaveid].logger.setLevel(self.level)

        return None, {
            'artifacts': {
                test_ident: {
                    'files': {
                        self.ident: artifacts
                    }
                }
            }
        }
示例#6
0
    def __init__(self, game_cls, params={}):
        """Create an agent given a game class.

        Args:
            params (dict):
                discount_rate       default is 0.95
                learning_rate       default is 0.001
                exploration_rate    default is 1
                exploration_decay   default is 0.995
                exploration_min     default is 0.01

        """

        self.number = Agent.agents_n
        Agent.agents_n += 1

        self.states = game_cls.states
        self.actions = game_cls.actions
        self.action_size = len(self.actions)

        self.qvalues = [
            [0 for action in self.actions]
            for state in self.states
        ]
        self.extras = {
            'cumul_reward': 0,
            'updates_n': 0,
        }
        self.history = History(10)

        self.params = dict(Agent.params)
        self.set_params(**params)

        self.log = create_logger(str(self), log_level=LOG_LEVEL)
        self.log.info("Agent initialized with params: %s.", self.params)
示例#7
0
 def __init__(self, cmd):
     """
     Args:
         cmd: command line parameters.
     """
     self.log = create_logger(
         __name__, silent=False,
         to_disk=True, log_file=cfg.log)
     self.cmd = cmd
     self.log.info(str(cmd))
示例#8
0
def create_app(config_type):
    """应用初始化"""
    # 创建Flask应用
    app = create_flask_app(config_type)
    # 初始化组件
    register_extensions(app)

    # 添加日志
    app.config['PROPAGATE_EXCEPTIONS'] = False  #设置传播异常
    from utils.log import create_logger
    create_logger(app)

    from utils.middlewares import get_userinfo
    app.before_request(get_userinfo)

    # 注册蓝图
    register_blueprint(app)

    return app
示例#9
0
    def __init__(self):
        super().__init__()
        self.board = []
        for row in range(3):
            for col in range(3):
                self.board.append(Cell(row, col))
        self.history = defaultdict(list)
        self.winner = None
        self.ended = False
        self.players = [Player(0, "X"), Player(1, "O")]

        self.log = create_logger(self.__class__.__name__, log_level=LOG_LEVEL)
示例#10
0
 def handle(self):
     """
     Handle multiple requests - each expected to be a 4-byte length,
     followed by the LogRecord in pickle format. Logs the record
     according to whatever policy is configured locally.
     """
     global logger_cache
     while True:
         chunk = self.connection.recv(4)
         if len(chunk) < 4:
             break
         slen = struct.unpack(">L", chunk)[0]
         chunk = self.connection.recv(slen)
         while len(chunk) < slen:
             chunk = chunk + self.connection.recv(slen - len(chunk))
         obj = pickle.loads(chunk)
         record = logging.makeLogRecord(obj)
         # if a name is specified, we use the named logger rather than the one
         # implied by the record.
         if self.server.logname is not None:
             name = self.server.logname
         else:
             name = record.name
         if not name:
             filename = logs_path.join("sprout.log")
         else:
             fields = name.split(".")
             fields[-1] += ".log"
             filename = logs_path
             for field in fields:
                 filename = filename.join(field)
                 if not field.endswith(".log"):
                     with global_fs_lock:
                         if not filename.exists():
                             filename.mkdir()
         filename = filename.strpath
         with logger_cache_lock:
             if filename in logger_cache:
                 logger, lock = logger_cache[filename]
             else:
                 logger = create_logger(name,
                                        filename=filename,
                                        max_file_size=MAX_FILE_SIZE,
                                        max_backups=MAX_BACKUPS)
                 lock = Lock()
                 logger_cache[filename] = (logger, lock)
         # N.B. EVERY record gets logged. This is because Logger.handle
         # is normally called AFTER logger-level filtering. If you want
         # to do filtering, do it at the client end to save wasting
         # cycles and network bandwidth!
         with lock:
             logger.handle(record)
示例#11
0
 def handle(self):
     """
     Handle multiple requests - each expected to be a 4-byte length,
     followed by the LogRecord in pickle format. Logs the record
     according to whatever policy is configured locally.
     """
     global logger_cache
     while True:
         chunk = self.connection.recv(4)
         if len(chunk) < 4:
             break
         slen = struct.unpack(">L", chunk)[0]
         chunk = self.connection.recv(slen)
         while len(chunk) < slen:
             chunk = chunk + self.connection.recv(slen - len(chunk))
         obj = pickle.loads(chunk)
         record = logging.makeLogRecord(obj)
         # if a name is specified, we use the named logger rather than the one
         # implied by the record.
         if self.server.logname is not None:
             name = self.server.logname
         else:
             name = record.name
         if not name:
             filename = logs_path.join("sprout.log")
         else:
             fields = name.split(".")
             fields[-1] += ".log"
             filename = logs_path
             for field in fields:
                 filename = filename.join(field)
                 if not field.endswith(".log"):
                     with global_fs_lock:
                         if not filename.exists():
                             filename.mkdir()
         filename = filename.strpath
         with logger_cache_lock:
             if filename in logger_cache:
                 logger, lock = logger_cache[filename]
             else:
                 logger = create_logger(
                     name, filename=filename, max_file_size=MAX_FILE_SIZE, max_backups=MAX_BACKUPS
                 )
                 lock = Lock()
                 logger_cache[filename] = (logger, lock)
         # N.B. EVERY record gets logged. This is because Logger.handle
         # is normally called AFTER logger-level filtering. If you want
         # to do filtering, do it at the client end to save wasting
         # cycles and network bandwidth!
         with lock:
             logger.handle(record)
示例#12
0
    def __init__(self, game_cls, rewards=None):
        """Create an environnemnt given a game class.

        Args:
            game_cls (cls of games.game.Game)
            rewards (dict): @see Environment.rewards
        """
        self.log = create_logger(self.__class__.__name__, LOG_LEVEL)
        self.rewards = dict(Environment.rewards)
        if rewards:
            self.rewards.update(rewards)
        self.log.info(
            "Playing %s with %s."
            % (game_cls.__name__, self.rewards)
        )
        self.game_cls = game_cls
        self.game = game_cls()
示例#13
0
    def start_test(self, artifact_path, test_name, test_location, slaveid):
        if not slaveid:
            slaveid = "Master"
        test_ident = "{}/{}".format(test_location, test_name)
        if slaveid in self.tests:
            if self.tests[slaveid].in_progress:
                print "Test already running, can't start another, logger"
                return None
        self.tests[slaveid] = self.Test(test_ident)
        self.tests[slaveid].in_progress = True
        artifacts = []
        os_filename = self.ident + "-" + "cfme.log"
        os_filename = os.path.join(artifact_path, os_filename)
        if os.path.isfile(os_filename):
            os.remove(os_filename)
        artifacts.append(os_filename)
        self.tests[slaveid].logger = create_logger(self.ident + test_name, os_filename)
        self.tests[slaveid].logger.setLevel(self.level)

        return None, {'artifacts': {test_ident: {'files': {self.ident: artifacts}}}}
示例#14
0
from utils.conf import docker as docker_conf
from utils.appliance import Appliance
from utils.trackerbot import api
from utils.log import create_logger
from slumber.exceptions import HttpClientError

token = docker_conf['gh_token']
owner = docker_conf['gh_owner']
repo = docker_conf['gh_repo']

tapi = api()

CONT_LIMIT = docker_conf['workers']
DEBUG = docker_conf.get('debug', False)

logger = create_logger('check_prs', 'prt.log')


def perform_request(url):
    """ Simple function to assist in performing GET requests from github

    Runs if there is a token, else just spits out a blank dict

    Args:
        url: The url to process, which is anything after the "...repos/"

    """
    out = {}
    if token:
        headers = {'Authorization': 'token {}'.format(token)}
        full_url = "https://api.github.com/repos/{}/{}/{}".format(owner, repo, url)
示例#15
0
about the collector.
"""
import pytest
import signal
import subprocess
import time

from cfme.configure import configuration
from utils.conf import env
from utils.log import create_logger
from utils.net import random_port, my_ip_address, net_check_remote
from utils.path import scripts_path
from utils.smtp_collector_client import SMTPCollectorClient


logger = create_logger('emails')


@pytest.yield_fixture(scope="session")
def _smtp_test_session(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %d and %d open" % (mail_query_port, mail_server_port))
    smtp_conf = configuration.SMTPSettings(
示例#16
0
 def log(self):
     return create_logger("rest")
示例#17
0
"""
import pytest
import signal
import subprocess
import time

from cfme.configure import configuration
from fixtures.artifactor_plugin import art_client, get_test_idents
from utils.conf import env
from utils.log import create_logger
from utils.net import random_port, my_ip_address, net_check_remote
from utils.path import scripts_path
from utils.smtp_collector_client import SMTPCollectorClient


logger = create_logger("emails")


@pytest.fixture(scope="function")
def smtp_test(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports %s and %s open", mail_query_port, mail_server_port)
    smtp_conf = configuration.SMTPSettings(host=my_ip, port=mail_server_port, auth="none")
示例#18
0
文件: main.py 项目: GauravG8/snowball
def main():
    print "Running . . ."

    # initialize seeds/patterns list
    seeds = []
    snowball_patterns = []
    seed_dict = io.parse_seed_file(config.SNOWBALL_SEEDS_FILE)
    tuples = []

    # output tuples/patterns files + main logger
    tuples_f = open(config.SNOWBALL_TUPLES_FILE, 'a')
    patterns_f = open(config.SNOWBALL_PATTERNS_FILE, 'a')
    logger = log.create_logger("snowball", "snowball.log")

    # partition sentence search space by iteration
    sentence_count = config.SNOWBALL_SENTENCE_CAP
    sentences_per_iter = sentence_count / config.SNOWBALL_NUM_ITERATIONS
    remainder = sentence_count % config.SNOWBALL_NUM_ITERATIONS
    counts = [sentences_per_iter] * config.SNOWBALL_NUM_ITERATIONS
    counts[-1] += remainder # tack on remainder

    # create initial seed and tuple sets
    for (subj, obj) in seed_dict['pairs']:
        tup = classes.CandidateTuple(seed_dict['rel'],
                                     subj,
                                     obj,
                                     seed_dict['subj_tag'],
                                     seed_dict['obj_tag'],
                                     1.0)
        seeds.append(tup)
        tuples.append(tup)
        
    # some info on seeds and relation settings
    logger.info("Relation: %r, Subject tag: %r, Object Tag: %r",
                seed_dict['rel'],
                seed_dict['subj_tag'],
                seed_dict['obj_tag'])
    logger.info("Seeds: %r", seed_dict['pairs'])

    # begin
    for i in xrange(config.SNOWBALL_NUM_ITERATIONS):
        logger.info("Beginning iteration %r", i+1)

        raw_patterns = []
        candidate_tuples = {tup: {'matches': [],
                                  'raw_patterns': []} for tup in seeds}

        logger.info("PATTERN EXTRACTION PHASE")

        # retrieve sentences / extract raw patterns
        logger.info("Retrieving sentences and extracting raw patterns . . .")
        for tup in seeds:
            count = es.count_sentences_containing(tup)
            hits = es.get_sentences_containing(tup, 0, count)
            
            for hit in hits:
                source = hit['_source']
                sent = classes.Sentence(source['id'],
                                        source['index'],
                                        source['tokens'],
                                        source['tagged_tokens'])
                raw_patterns.extend(sent.extract_raw_patterns(tup))

        logger.info("Number of raw patterns: %d", len(raw_patterns))
        
        # cluster raw patterns
        logger.info("Clustering raw patterns . . .")
        clusterer = classes.SinglePassClusteringAlgorithm(raw_patterns,
                                                          config.SNOWBALL_MIN_PATTERN_SIMILARITY)
        clusterer.prepare()
        clusterer.cluster()

        new_snowball_patterns = clusterer.get_snowball_patterns()

        if len(new_snowball_patterns) == 0:
            logger.info("No new patterns. Prematurely exiting on iteration %d", i+1)
            break

        snowball_patterns.extend(new_snowball_patterns)        
        
        logger.info("Total number of patterns: %d", len(snowball_patterns))

        logger.info("Writing new patterns to: %r", config.SNOWBALL_TUPLES_FILE)
        for pat in new_snowball_patterns:
            io.write_line(patterns_f, pat)

        logger.info("TUPLE EXTRACTION PHASE")

        logger.info("Searching through sentences . . .")

        # retrieve sentences one at a time to avoid blowing up memory
        for j in xrange(counts[i]):
            from_offset = j + i*sentences_per_iter
            source = es.get_sentences_with_tags(seed_dict['subj_tag'],
                                                seed_dict['obj_tag'],
                                                from_offset,
                                                1)[0]['_source']
            sent = classes.Sentence(source['id'],
                                    source['index'],
                                    source['tokens'],
                                    source['tagged_tokens'])
            candidates = sent.extract_candidate_tuples(seed_dict['rel'],
                                                       seed_dict['subj_tag'],
                                                       seed_dict['obj_tag'])

            for (candidate, raw_pattern) in candidates:
                best_similarity = 0.0
                best_pattern = None

                for sb_pattern in snowball_patterns:
                    similarity = sb_pattern.match(raw_pattern)

                    if similarity >= config.SNOWBALL_MIN_PATTERN_SIMILARITY:
                        sb_pattern.update_confidence(candidate, seeds)

                        if similarity >= best_similarity:
                            best_similarity = similarity
                            best_pattern = sb_pattern

                if best_similarity >= config.SNOWBALL_MIN_PATTERN_SIMILARITY:
                    if candidate not in candidate_tuples:
                        candidate_tuples[candidate] = {'matches': [],
                                                       'raw_patterns': []}
                    candidate_tuples[candidate]['matches'].append((best_similarity,
                                                                       best_pattern))
                    candidate_tuples[candidate]['raw_patterns'].append(raw_pattern)

        new_tuples = candidate_tuples.keys()

        logger.info("Number of candidate tuples: %d", len(new_tuples))

        for new_tuple in new_tuples:
            new_tuple.update_confidence(candidate_tuples[new_tuple]['matches'], snowball_patterns)
            new_tuple.add_patterns(candidate_tuples[new_tuple]['raw_patterns'])

        seeds = [tup for tup in new_tuples if tup.confidence() >= config.SNOWBALL_MIN_TUPLE_CONFIDENCE]

        if len(seeds) == 0:
            logger.info("No new seeds. Prematurely exiting on iteration %d", i+1)
            break

        tuples.extend(seeds) # store everything

        logger.info("Number of seed tuples: %d", len(seeds))

        logger.info("Total tuples so far (roughly): %d", len(tuples))

        logger.info("Ending iteration %d", i+1)

    logger.info("Writing tuples to: %r", config.SNOWBALL_TUPLES_FILE)
    for tup in tuples:
        io.write_line(tuples_f, tup)

    # close files
    tuples_f.close()
    patterns_f.close()

    print "Done!"
示例#19
0
import pandas as pd
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.utils import shuffle
from sklearn.utils.class_weight import compute_class_weight
from tabulate import tabulate
import torch
from torch import nn

from activity_data_labeler import (LABEL_DAILY, LABEL_ITEMS,
                                   LABEL_ITEMS_INDEX_DICT, LABEL_OTHER_SPORTS)
from utils import log, plotting
from utils.common import ewma, load_dataset
from utils.model_utils import GeneralModelPredictor
from utils.trainer import data_normalize, normalize_data, training

logger = log.create_logger('ActivityResearch', level=logging.DEBUG)

# Other type from type name to int label
LABEL_OTHER = LABEL_DAILY + LABEL_OTHER_SPORTS
LABEL_BRISKING = ['BriskWalkInDoor', 'BriskWalkOutSide', 'SlowWalk']
LABEL_RUNNING = ['RuningInDoor', 'RuningOutSide']
LABEL_BIKING = ['BikingOutSide']
LABEL_ROWING = ['RowingMachine']
LABEL_ELLIPTICAL = ['EllipticalMachine']

LABEL_IN_USE = LABEL_OTHER + LABEL_BRISKING + LABEL_RUNNING + LABEL_BIKING + LABEL_ROWING + LABEL_ELLIPTICAL
LABEL_IN_USE_INDEX = [
    LABEL_ITEMS_INDEX_DICT.get(name) for name in LABEL_IN_USE
]

# Map the origin index to activity type for model training
示例#20
0
def logger():
    return create_logger("sprout")
def main(args):

    with open("./configs/{}/{}_{}_{}.yaml".format(args.net, args.dataset,
                                                  args.backbone,
                                                  args.mode)) as fp:
        cfg = yaml.safe_load(fp)

    NET_ARGS = cfg['NET_ARGS']
    DATA_ARGS = cfg['DATA_ARGS']
    EXP_ARGS = cfg['EXP_ARGS']

    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'

    model_dir = os.path.join('./saved_models/', args.dataset, args.backbone,
                             args.net, args.mode)

    makedir(model_dir)

    log, logclose = create_logger(log_filename=os.path.join(
        model_dir, 'train_logger_{}.txt'.format(
            datetime.datetime.now().strftime("%H:%M:%S"))))

    img_dir = os.path.join(model_dir, 'img')
    makedir(img_dir)
    weight_matrix_filename = 'outputL_weights'
    prototype_img_filename_prefix = 'prototype-img'
    prototype_self_act_filename_prefix = 'prototype-self-act'
    proto_bound_boxes_filename_prefix = 'bb'
    log(pformat(cfg))

    # ---------------------------------------- Get DataLoaders ----------------------------------------------

    normalize = transforms.Normalize(mean=NET_ARGS['mean'],
                                     std=NET_ARGS['std'])
    train_transforms = transforms.Compose([
        transforms.Resize(size=(DATA_ARGS['img_size'], DATA_ARGS['img_size'])),
        transforms.ToTensor(),
        normalize,
    ])

    train_dataset = datasets.ImageFolder(DATA_ARGS['train_dir'],
                                         train_transforms)
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=EXP_ARGS['train_batch_size'],
        shuffle=True,
        num_workers=4,
        pin_memory=False)

    train_push_dataset = datasets.ImageFolder(
        DATA_ARGS['train_push_dir'],
        transforms.Compose([
            transforms.Resize(size=(DATA_ARGS['img_size'],
                                    DATA_ARGS['img_size'])),
            transforms.ToTensor(),
        ]))

    train_push_loader = torch.utils.data.DataLoader(
        train_push_dataset,
        batch_size=EXP_ARGS['train_push_batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=False)

    test_dataset = datasets.ImageFolder(
        DATA_ARGS['test_dir'],
        transforms.Compose([
            transforms.Resize(size=(DATA_ARGS['img_size'],
                                    DATA_ARGS['img_size'])),
            transforms.ToTensor(),
            normalize,
        ]))

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=EXP_ARGS['test_batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=False)

    log('training set size: {0}'.format(len(train_loader.dataset)))
    log('push set size: {0}'.format(len(train_push_loader.dataset)))
    log('test set size: {0}'.format(len(test_loader.dataset)))
    log('batch size: {0}'.format(EXP_ARGS['train_batch_size']))

    # ------------------------------------ Model and Optimizer ----------------------------------------------

    ppnet = model_AttProto.construct_PPNet(
        base_architecture=NET_ARGS['base_architecture'],
        pretrained=True,
        img_size=DATA_ARGS['img_size'],
        prototype_shape=NET_ARGS['prototype_shape'],
        num_classes=DATA_ARGS['num_classes'],
        prototype_activation_function=NET_ARGS[
            'prototype_activation_function'],
        add_on_layers_type=NET_ARGS['add_on_layers_type'],
        att_version=NET_ARGS['ATT_VERSION'])

    ppnet = ppnet.cuda()
    ppnet_multi = torch.nn.DataParallel(ppnet)
    class_specific = True

    if EXP_ARGS['RESUME']['iS_RESUME']:
        ppnet = torch.load(EXP_ARGS['RESUME']['PATH'])
        log(" Resumed from model: {}".format(EXP_ARGS['RESUME']['PATH']))
        ppnet_multi = torch.nn.DataParallel(ppnet)
        accu = tnt.test(model=ppnet_multi,
                        dataloader=test_loader,
                        class_specific=True,
                        log=log,
                        EXP_ARGS=EXP_ARGS)
        log("\nInit Accuracy {:.2f} \n\n".format(accu))

    ppnet_multi = torch.nn.DataParallel(ppnet)

    warm_optimizer_lrs = EXP_ARGS['OPTIMIZER']['warm_optimizer_lrs']
    warm_optimizer_specs = [
        {
            'params': ppnet.add_on_layers.parameters(),
            'lr': warm_optimizer_lrs['add_on_layers'],
            'weight_decay': 1e-3
        },
        {
            'params': ppnet.prototype_vectors,
            'lr': warm_optimizer_lrs['prototype_vectors']
        },
        {
            'params': ppnet.att_layer.parameters(),
            'lr': warm_optimizer_lrs['att_layer'],
            'weight_decay': 1e-3
        },
    ]
    warm_optimizer = torch.optim.Adam(warm_optimizer_specs)

    joint_optimizer_lrs = EXP_ARGS['OPTIMIZER']['joint_optimizer_lrs']
    joint_optimizer_specs = [{
        'params': ppnet.features.parameters(),
        'lr': joint_optimizer_lrs['features'],
        'weight_decay': 1e-3
    }, {
        'params': ppnet.add_on_layers.parameters(),
        'lr': joint_optimizer_lrs['add_on_layers'],
        'weight_decay': 1e-3
    }, {
        'params': ppnet.prototype_vectors,
        'lr': joint_optimizer_lrs['prototype_vectors']
    }, {
        'params': ppnet.att_layer.parameters(),
        'lr': joint_optimizer_lrs['att_layer'],
        'weight_decay': 1e-3
    }]
    joint_optimizer = torch.optim.Adam(joint_optimizer_specs)

    joint_lr_scheduler = torch.optim.lr_scheduler.StepLR(
        joint_optimizer,
        step_size=int(joint_optimizer_lrs['joint_lr_step_size']),
        gamma=0.1)

    push_epochs = [
        i for i in range(EXP_ARGS['num_train_epochs']) if i % 10 == 0
    ]

    log('\n\n------------------------ Start Training ----------------------------\n\n'
        )

    max_acc = 0.0
    max_acc_epoch = 0
    max_acc_iter = 0
    target_accu = 0.1

    for epoch in range(EXP_ARGS['start_epoch'], EXP_ARGS['num_train_epochs']):

        log('------------------------- Epoch: {}  -------------------------------------'
            .format(epoch))

        if epoch < EXP_ARGS['num_warm_epochs']:
            tnt.warm_only(model=ppnet_multi, log=log)
            _ = tnt.train(model=ppnet_multi,
                          dataloader=train_loader,
                          optimizer=warm_optimizer,
                          class_specific=class_specific,
                          coefs=EXP_ARGS['LOSS']['loss_coefs_warm'],
                          log=log,
                          EXP_ARGS=EXP_ARGS)
        else:
            tnt.joint(model=ppnet_multi, log=log)
            joint_lr_scheduler.step()

            _ = tnt.train(model=ppnet_multi,
                          dataloader=train_loader,
                          optimizer=joint_optimizer,
                          class_specific=class_specific,
                          coefs=EXP_ARGS['LOSS']['loss_coefs_joint'],
                          log=log,
                          EXP_ARGS=EXP_ARGS)

        accu = tnt.test(model=ppnet_multi,
                        dataloader=test_loader,
                        class_specific=class_specific,
                        log=log,
                        EXP_ARGS=EXP_ARGS)

        if accu > max_acc:
            max_acc = accu
            max_acc_iter = 0
            max_acc_epoch = epoch
            save.save_model_w_condition(model=ppnet,
                                        model_dir=model_dir,
                                        model_name='',
                                        accu=accu,
                                        target_accu=target_accu,
                                        log=log,
                                        best=True,
                                        stage='prepush_{}'.format(epoch))

        log("\nBest Accuracy {:.2f} at epoch {} and iter {}\n\n".format(
            max_acc, max_acc_epoch, max_acc_iter))

        if epoch >= EXP_ARGS['push_start'] and epoch in push_epochs:

            save.save_model_w_condition(model=ppnet,
                                        model_dir=model_dir,
                                        model_name='',
                                        accu=accu,
                                        target_accu=target_accu,
                                        log=log,
                                        best=True,
                                        stage='prepushfinal_{}'.format(epoch))

            log('\n------------------------- Push Prototypes -----------------------------'
                )

            push.push_prototypes(
                train_push_loader,
                prototype_network_parallel=ppnet_multi,
                class_specific=class_specific,
                preprocess_input_function=preprocess_input_function,
                prototype_layer_stride=1,
                root_dir_for_saving_prototypes=img_dir,
                epoch_number=epoch,
                prototype_img_filename_prefix=prototype_img_filename_prefix,
                prototype_self_act_filename_prefix=
                prototype_self_act_filename_prefix,
                proto_bound_boxes_filename_prefix=
                proto_bound_boxes_filename_prefix,
                save_prototype_class_identity=True,
                log=log)

            accu = tnt.test(model=ppnet_multi,
                            dataloader=test_loader,
                            class_specific=class_specific,
                            log=log,
                            EXP_ARGS=EXP_ARGS)

            save.save_model_w_condition(model=ppnet,
                                        model_dir=model_dir,
                                        model_name='',
                                        accu=accu,
                                        target_accu=target_accu,
                                        log=log,
                                        best=True,
                                        stage='push_{}'.format(epoch))

            last_layer_optimizer_specs = [{
                'params':
                ppnet.last_layer.parameters(),
                'lr':
                EXP_ARGS['OPTIMIZER']['last_layer_optimizer_lrs']
                ['last_layer_optimizer_lr']
            }]

            last_layer_optimizer = torch.optim.Adam(last_layer_optimizer_specs)
            last_lr_lr_scheduler = torch.optim.lr_scheduler.StepLR(
                last_layer_optimizer,
                step_size=EXP_ARGS['OPTIMIZER']['last_layer_optimizer_lrs']
                ['last_lr_step_size'],
                gamma=0.1)

            log('\n------------------------- Last Layer Training -----------------------------------'
                )

            if NET_ARGS['prototype_activation_function'] != 'linear':
                tnt.last_only(model=ppnet_multi, log=log)

                max_acc_post, max_acc_post_iter, max_acc_post_epoch = 0, 0, epoch

                for i in range(
                        EXP_ARGS['OPTIMIZER']['last_layer_optimizer_lrs']
                    ['last_layer_optimizer_iters']):
                    log('Last layer optimization, Iteration:  {0}'.format(i))
                    _ = tnt.train(model=ppnet_multi,
                                  dataloader=train_loader,
                                  optimizer=last_layer_optimizer,
                                  class_specific=class_specific,
                                  coefs=EXP_ARGS['LOSS']['loss_coefs_joint'],
                                  log=log,
                                  EXP_ARGS=EXP_ARGS)

                    last_lr_lr_scheduler.step()
                    accu = tnt.test(model=ppnet_multi,
                                    dataloader=test_loader,
                                    class_specific=class_specific,
                                    log=log,
                                    EXP_ARGS=EXP_ARGS)

                    if accu > max_acc_post:
                        max_acc_post = accu
                        max_acc_post_iter = i
                        max_acc_post_epoch = epoch
                        save.save_model_w_condition(
                            model=ppnet,
                            model_dir=model_dir,
                            model_name='',
                            accu=accu,
                            target_accu=0.70,
                            log=log,
                            best=True,
                            stage='postpush_{}'.format(epoch))

                    log("Best Accuracy - PostPush {:.2f} at epoch {} and iter {}"
                        .format(max_acc_post, max_acc_post_epoch,
                                max_acc_post_iter))

                save.save_model_w_condition(
                    model=ppnet,
                    model_dir=model_dir,
                    model_name='',
                    accu=accu,
                    target_accu=0.70,
                    log=log,
                    best=True,
                    stage='postpushfinal_{}'.format(epoch))

    logclose()
示例#22
0
 def log(self):
     return create_logger("rest")
示例#23
0
parser = argparse.ArgumentParser(description='xxx')
parser.add_argument('--config', type=str, default='cfg/train_birds.yml')
args = parser.parse_args()
cfg_from_file(args.config)

os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU_ID

# Set directories and logger
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
output_dir = 'sample/%s_%s_%s' % (cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp)
log_dir = os.path.join(
    cfg.LOG_DIR, '%s_%s_%s' % (cfg.DATASET_NAME, cfg.CONFIG_NAME, timestamp))
os.makedirs(log_dir, exist_ok=True)
log_filename = os.path.join(log_dir, 'train.log')
log, logclose = create_logger(log_filename=log_filename)
writer = SummaryWriter(log_dir=log_dir)

with open(log_filename, 'w+') as logFile:
    pprint.pprint(cfg, logFile)

log('')
log('============================================================================'
    )
log('')
log('Dataset:')
image_transform = transforms.Compose([transforms.Resize((128, 128))])

train_dataset = CUBDataset(cfg.DATA_DIR,
                           transform=image_transform,
                           split='train')
import config as cfg
示例#25
0
def logger():
    return create_logger("sprout_vm_actions")
示例#26
0
def main():
    global args, best_EPE, save_path
    args = parser.parse_args()

    # Load config file
    if args.cfg is not None:
        cfg_from_file(args.cfg)
        assert cfg.TAG == os.path.splitext(os.path.basename(
            args.cfg))[0], 'TAG name should be file name'

    # Build save_path, which can be specified by out_dir and exp_dir
    save_path = '{},{}epochs{},b{},lr{}'.format(
        'dicl_wrapper', args.epochs,
        ',epochSize' + str(args.epoch_size) if args.epoch_size > 0 else '',
        args.batch_size, args.lr)

    save_path = os.path.join(args.exp_dir, save_path)
    if args.out_dir is not None:
        outpath = os.path.join(args.out_dir, args.dataset)
    else:
        outpath = args.dataset
    save_path = os.path.join(outpath, save_path)

    if not os.path.exists(outpath): os.makedirs(outpath)
    if not os.path.exists(save_path): os.makedirs(save_path)

    # Create logger
    log_file = os.path.join(save_path, 'log.txt')
    logger = create_logger(log_file)
    logger.info('**********************Start logging**********************')
    logger.info('=> will save everything to {}'.format(save_path))

    # Print settings
    for _, key in enumerate(args.__dict__):
        logger.info(args.__dict__[key])
    save_config_to_file(cfg, logger=logger)
    logger.info(args.pretrained)

    # Set random seed
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    train_writer = SummaryWriter(os.path.join(save_path, 'train'))
    eval_writer = SummaryWriter(os.path.join(save_path, 'eval'))

    logger.info("=> fetching img pairs in '{}'".format(args.data))

    ########################## DATALOADER ##########################
    if args.dataset == 'flying_chairs':
        if cfg.SIMPLE_AUG:
            train_dataset = datasets.FlyingChairs_SimpleAug(args,
                                                            root=args.data)
            test_dataset = datasets.FlyingChairs_SimpleAug(args,
                                                           root=args.data,
                                                           mode='val')
        else:
            train_dataset = datasets.FlyingChairs(args,
                                                  image_size=cfg.CROP_SIZE,
                                                  root=args.data)
            test_dataset = datasets.FlyingChairs(args,
                                                 root=args.data,
                                                 mode='val',
                                                 do_augument=False)
    elif args.dataset == 'flying_things':
        train_dataset = datasets.SceneFlow(args,
                                           image_size=cfg.CROP_SIZE,
                                           root=args.data,
                                           dstype='frames_cleanpass',
                                           mode='train')
        test_dataset = datasets.SceneFlow(args,
                                          image_size=cfg.CROP_SIZE,
                                          root=args.data,
                                          dstype='frames_cleanpass',
                                          mode='val',
                                          do_augument=False)
    elif args.dataset == 'mpi_sintel_clean' or args.dataset == 'mpi_sintel_final':
        clean_dataset = datasets.MpiSintel(args,
                                           image_size=cfg.CROP_SIZE,
                                           root=args.data,
                                           dstype='clean')
        final_dataset = datasets.MpiSintel(args,
                                           image_size=cfg.CROP_SIZE,
                                           root=args.data,
                                           dstype='final')
        train_dataset = torch.utils.data.ConcatDataset([clean_dataset] +
                                                       [final_dataset])
        if args.dataset == 'mpi_sintel_final':
            test_dataset = datasets.MpiSintel(args,
                                              do_augument=False,
                                              image_size=None,
                                              root=args.data,
                                              dstype='final')
        else:
            test_dataset = datasets.MpiSintel(args,
                                              do_augument=False,
                                              image_size=None,
                                              root=args.data,
                                              dstype='clean')
    elif args.dataset == 'KITTI':
        train_dataset = datasets.KITTI(args,
                                       image_size=cfg.CROP_SIZE,
                                       root=args.data,
                                       is_val=False,
                                       logger=logger)
        if args.data_kitti12 is not None:
            train_dataset12 = datasets.KITTI12(args,
                                               image_size=cfg.CROP_SIZE,
                                               root=args.data_kitti12,
                                               is_val=False,
                                               logger=logger)
            train_dataset = torch.utils.data.ConcatDataset([train_dataset] +
                                                           [train_dataset12])
        test_dataset = datasets.KITTI(args,
                                      root=args.data,
                                      do_augument=False,
                                      is_val=True,
                                      do_pad=False)
    else:
        raise NotImplementedError

    logger.info('Training with %d image pairs' % len(train_dataset))

    logger.info('Testing with %d image pairs' % len(test_dataset))

    gpuargs = {'num_workers': args.workers, 'drop_last': cfg.DROP_LAST}
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               pin_memory=True,
                                               shuffle=True,
                                               **gpuargs)

    if 'KITTI' in args.dataset:
        # We set batch size to 1 since KITTI images have different sizes
        val_loader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=1,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 shuffle=False)
    else:
        val_loader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=args.batch_size,
                                                 num_workers=args.workers,
                                                 pin_memory=True,
                                                 shuffle=False)

    # create model
    if args.pretrained:
        logger.info("=> using pre-trained model '{}'".format(args.pretrained))
        pretrained_dict = torch.load(args.pretrained)

        if 'state_dict' in pretrained_dict.keys():
            pretrained_dict['state_dict'] = {
                k: v
                for k, v in pretrained_dict['state_dict'].items()
            }

    model = models.__dict__['dicl_wrapper'](None)

    assert (args.solver in ['adam', 'sgd'])
    logger.info('=> setting {} solver'.format(args.solver))

    if args.solver == 'adam':
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=cfg.WEIGHT_DECAY,
                                     betas=(cfg.MOMENTUM, cfg.BETA))
    elif args.solver == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    weight_decay=cfg.WEIGHT_DECAY,
                                    momentum=cfg.MOMENTUM)

    if args.pretrained:
        if 'state_dict' in pretrained_dict.keys():
            model.load_state_dict(pretrained_dict['state_dict'], strict=False)
        else:
            model.load_state_dict(pretrained_dict, strict=False)

        if args.reuse_optim:
            try:
                optimizer.load_state_dict(pretrained_dict['optimizer_state'])
            except:
                logger.info('do not have optimizer state')
        del pretrained_dict
        torch.cuda.empty_cache()

    model = torch.nn.DataParallel(model)

    if torch.cuda.is_available():
        model = model.cuda()

    # Evaluation
    if args.evaluate:
        with torch.no_grad():
            best_EPE = validate(val_loader,
                                model,
                                0,
                                None,
                                eval_writer,
                                logger=logger)
        return

    # Learning rate schedule
    milestones = []
    for num in range(len(args.milestones)):
        milestones.append(int(args.milestones[num]))

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=milestones,
                                                     gamma=0.5)

    ###################################### Training  ######################################
    for epoch in range(args.start_epoch, args.epochs):

        # train for one epoch
        train_loss = train(train_loader,
                           model,
                           optimizer,
                           epoch,
                           train_writer,
                           logger=logger)
        scheduler.step()

        train_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        train_writer.add_scalar('avg_loss', train_loss, epoch)

        if epoch % args.eval_freq == 0 and not args.no_eval:
            with torch.no_grad():
                EPE = validate(val_loader,
                               model,
                               epoch,
                               output_writers,
                               eval_writer,
                               logger=logger)
            eval_writer.add_scalar('mean_EPE', EPE, epoch)

            if best_EPE < 0:
                best_EPE = EPE

            if EPE < best_EPE:
                best_EPE = EPE
                ckpt_best_file = 'checkpoint_best.pth.tar'
                save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'arch': 'dicl_wrapper',
                        'state_dict': model.module.state_dict(),
                        'optimizer_state': optimizer.state_dict(),
                        'best_EPE': EPE
                    },
                    False,
                    filename=ckpt_best_file)
            logger.info('Epoch: [{0}] Best EPE: {1}'.format(epoch, best_EPE))

        # Skip at least 5 epochs to save memory
        save_freq = max(args.eval_freq, 5)
        if epoch % save_freq == 0:
            ckpt_file = 'checkpoint_' + str(epoch) + '.pth.tar'
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': 'dicl_wrapper',
                    'state_dict': model.module.state_dict(),
                    'optimizer_state': optimizer.state_dict(),
                    'best_EPE': best_EPE
                },
                False,
                filename=ckpt_file)
示例#27
0
from utils.appliance import Appliance
from utils.trackerbot import api
from utils.log import create_logger
from slumber.exceptions import HttpClientError


token = docker_conf["gh_token"]
owner = docker_conf["gh_owner"]
repo = docker_conf["gh_repo"]

tapi = api()

CONT_LIMIT = docker_conf["workers"]
DEBUG = docker_conf.get("debug", False)

logger = create_logger("check_prs", "prt.log")

# Disable pika logs
logging.getLogger("pika").propagate = False


def send_message_to_bot(msg):

    required_fields = set(["rabbitmq_url", "gh_queue", "gh_channel", "gh_message_type"])
    if not required_fields.issubset(docker_conf.viewkeys()):
        logger.warn("Skipping - docker.yaml doesn't have {}".format(required_fields))
        return

    logger.info("Github PR bot: about to send '{}'".format(msg))
    url = docker_conf["rabbitmq_url"]
    queue = docker_conf["gh_queue"]
示例#28
0
about the collector.
"""
import pytest
import signal
import subprocess
import time

from cfme.configure import configuration
from fixtures.artifactor_plugin import art_client, get_test_idents
from utils.conf import env
from utils.log import create_logger
from utils.net import random_port, my_ip_address, net_check_remote
from utils.path import scripts_path
from utils.smtp_collector_client import SMTPCollectorClient

logger = create_logger('emails')


@pytest.fixture(scope="function")
def smtp_test(request):
    """Fixture, which prepares the appliance for e-mail capturing tests

    Returns: :py:class:`util.smtp_collector_client.SMTPCollectorClient` instance.
    """
    logger.info("Preparing start for e-mail collector")
    ports = env.get("mail_collector", {}).get("ports", {})
    mail_server_port = ports.get("smtp", None) or random_port()
    mail_query_port = ports.get("json", None) or random_port()
    my_ip = my_ip_address()
    logger.info("Mind that it needs ports {} and {} open".format(
        mail_query_port, mail_server_port))
示例#29
0
from enum import Enum, unique
import json
import os
from pathlib import Path

import PySimpleGUI as sg
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

from utils import log
from utils.data_labeler import DataLabeler, merge_labels

logger = log.create_logger('ActiDataLabeler')

SENSOR_DATA_NAMES = ['type', 'ts', 'x', 'y', 'z', 'xe', 'ye', 'ze']
SENSOR_DATA_HEADER = [
    'CurrentTimeMillis', 'EventTimestamp(ns)', 'accel_x', 'accel_y', 'accel_z'
]
CTM_HEADER_NAME = 'CurrentTimeMillis'
TS_HEADER_NAME = 'EventTimestamp(ns)'
TSS = [CTM_HEADER_NAME, TS_HEADER_NAME]

# !! In order to be compatible with historical annotations, please add the type at the end instead of inserting
# Refer to list: https://xiaomi.f.mioffice.cn/docs/dock4H2cyIYjwEGAnMmFH5GOcTc#
LABEL_ITEMS = (
    'Undefined',  # 0
    'Static',  # 1
    'DailyActivity',  # 2, Such as Working/Writing/Chatting
示例#30
0
from utils.conf import docker as docker_conf
from utils.appliance import Appliance
from utils.trackerbot import api
from utils.log import create_logger
from slumber.exceptions import HttpClientError

token = docker_conf['gh_token']
owner = docker_conf['gh_owner']
repo = docker_conf['gh_repo']

tapi = api()

CONT_LIMIT = docker_conf['workers']
DEBUG = docker_conf.get('debug', False)

logger = create_logger('check_prs', 'prt.log')


def perform_request(url):
    """ Simple function to assist in performing GET requests from github

    Runs if there is a token, else just spits out a blank dict

    Args:
        url: The url to process, which is anything after the "...repos/"

    """
    out = {}
    if token:
        headers = {'Authorization': 'token {}'.format(token)}
        full_url = "https://api.github.com/repos/{}/{}/{}".format(owner, repo, url)
示例#31
0
    def __init__(self, i, j):
        self.content = None
        self.position = Position(i, j)

        self.log = create_logger(self.__class__.__name__, log_level=LOG_LEVEL)
示例#32
0
from cached_property import cached_property

from fixtures.artifactor_plugin import art_client, get_test_idents
from fixtures.terminalreporter import reporter
from utils import providers
from utils.conf import env
from utils.db import cfmedb
from utils.datafile import template_env
from utils.events import setup_for_event_testing
from utils.log import create_logger
from utils.net import my_ip_address, random_port
from utils.path import scripts_path
from utils.ssh import SSHClient
from utils.wait import wait_for, TimedOutError

logger = create_logger('events')


def get_current_time_GMT():
    """ Because SQLite loves GMT.

    Returns:
        datetime() with current GMT time
    """
    return datetime.strptime(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), "%Y-%m-%d %H:%M:%S")


class HTMLReport(object):
    def __init__(self, test_name, registered_events, all_events):
        self.registered_events = registered_events
        self.test_name = test_name
示例#33
0
def api_logger():
    return create_logger("sprout_api")