Ejemplo n.º 1
0
    def __init__(self, args):
        """Constructor of the winservice."""
        win32serviceutil.ServiceFramework.__init__(self, args)
        self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
        socket.setdefaulttimeout(60)

        logger.init_logging()
        self.app = None
Ejemplo n.º 2
0
def main():
    ltype = 'basic'
    ext = None
    if len(sys.argv) >= 2:
        ltype = sys.argv[1]

    result = re.match('.+\.(.+)$', ltype)
    if result:
        ext = result.group(1)

    if ltype == 'basic':
        log = logger.init_basic(fname='simple.log')
    elif ltype == 'simple':
        log = logger.init_logging()
    elif ltype == 'rot':
        log = logger.init_logging(rotate_size=1000)
    elif ltype == 'trot':
#        log = logger.init_logging(rotate_interval=5)
        log = logger.init_logging(rotate_seconds=30)


    elif ltype == 'code':
        log = logger.init_logging(err_file="err.log")
    elif ext == 'yml':
        log = logger.init_logging_yaml(ltype)
    elif ext == 'ini':
        log = logger.init_logging_ini(ltype)
    else:
        print("invalid arg "+ ltype)
        return 1

    print("got here ")
    log.warning("BEGINS")
    log.debug('debug ')

    log.info("some info")

    log.warning("we might have a problem")

    log.error("something bad")

    sample.func1()

    logger.list_handlers()

    print(f"wrote: {logger.log_file}")

    return 0
Ejemplo n.º 3
0
 def __init__(self):
     """
     init初始化
     """
     # 清理旧日志
     clear_log(AIRTEST_LOG)
     # 设置日志目录
     set_logdir(AIRTEST_LOG)
     # 初始化日志
     init_logging()
     # 等待显示时间
     self.timeout = ST.FIND_TIMEOUT
     # airtest-api
     self.api = api
     self.poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)
     self.UIObj = UIObjectProxy(poco=self.poco)
Ejemplo n.º 4
0
from aiohttp import web

from utils.logger import init_logging
from logging import info

from db.create_db import init_db

import config
import telegram

if __name__ == '__main__':
    init_logging()
    init_db()
    info('Starting the bot')
    telegram.bot.remove_webhook()
    if config.dev_mode:
        telegram.bot.polling()
    else:
        import webhook
        telegram.bot.set_webhook(
            url=config.webhook_url_base + config.webhook_url_path,
            certificate=open(config.webhook_ssl_cert, 'r'))

        info('Starting the webhook')
        web.run_app(
            webhook.hook,
            host=config.webhook_listen,
            port=config.webhook_port,
            ssl_context=webhook.ssl_context,
        )
Ejemplo n.º 5
0
def email(x):
    automated_emails.auto_email(["*****@*****.**"],
                                subject="ERROR DETECTED: Recollect Data "
                                "Import/Ovewrite/Dataupload"
                                "Script",
                                text=str(x))


now = datetime.datetime.now().strftime('%Y%m%d_%H_Hours_%M_Mins_%S_Sec')
arcpy.env.overwriteOutput = 1
arcpy.Delete_management("in_memory")
out_direct = r"\\Apexgis\GIS\recollect_appdata"
pub_db = r"C:\Users\Jlong\AppData\Roaming\ESRI\Desktop10.5\ArcCatalog\APEXPUBLISHER_jlong.sde"
arcpy.env.workspace = pub_db

log = logger.init_logging(r"\\Apexgis\GIS\recollect_appdata\logfiles",
                          "RecollectDataUpload")
log.info("Initializing Log:%s" % log)
success_msg = "---SUCCESSFUL EXECUTION---"
fail_msg = "---FAILURE---"

try:
    par_csv = None
    if len(glob.glob(
            r'\\Apexgis\GIS\recollect_appdata\csv_download\*.csv')) > 1:
        print "There should only be the most current CSV file in the directory"
        log.warning(
            "More than 1 CSV detected in csv_download network directory")
    else:
        print "Current Recollect Parcels CSV detected"
        par_csv = glob.glob(
            r'\\Apexgis\GIS\recollect_appdata\csv_download\*.csv')[0]
Ejemplo n.º 6
0
import sys
from utils import shell
from utils import logger
from monitor.cluster_monitor import DbClusterMonitor

if __name__ == '__main__':
    logger.init_logging()

    config_loaded = False
    config = {}
    while not config_loaded:
        config_loaded, config = shell.load_config_ini()

    app = DbClusterMonitor(config)
    sys.exit(app.start())
Ejemplo n.º 7
0
import argparse
import traceback
import concurrent.futures

from backoffice.crawler.cdc import CdcCrawler
from utils.crawler import _NEW_RUMOR, _OLD_RUMOR, _FAILED, fetch_latest_create_date_of_rumor
from utils.logger import init_logging, logger
from utils.settings import Settings
from models.aws.ddb.rumor_model import RumorModel

setting = Settings(_env_file='config/env')
init_logging(setting)

parser = argparse.ArgumentParser()
parser.add_argument("-d",
                    "--date",
                    help="To crawler content from date",
                    default=None,
                    type=str)
parser.add_argument("-u",
                    "--update",
                    help="To update rumor content by re-crawlering",
                    default=False,
                    type=bool,
                    action=argparse.BooleanOptionalAction)
args = parser.parse_args()


def parsing_work(crawler, rumor_info):
    try:
        fetched = False
Ejemplo n.º 8
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--root_dir')
    parser.add_argument('--train_path', default='raw/rawtext/train.bioes.txt')
    parser.add_argument('--dev_path', default='raw/rawtext/valid.bioes.txt')
    parser.add_argument('--test_path', default='raw/rawtext/test.bioes.txt')
    parser.add_argument('--embedding_type',
                        choices=['senna', 'glove', 'sskip', 'polyglot'],
                        default='glove')
    parser.add_argument('--embedding_path',
                        default='raw/embedd/glove.6B.100d.txt')

    parser.add_argument('--ngpu', type=int, default=1)
    parser.add_argument('--gpu_id', type=int, default=0)
    parser.add_argument('--load_checkpoint',
                        action='store_true',
                        default=False)
    parser.add_argument('--load_checkpoint_path')
    parser.add_argument('--model_name',
                        choices=['RNNT', 'NCRFT'],
                        default='RNNT')
    parser.add_argument('--checkpoint_path', default='checkpoints')
    parser.add_argument('--tmp_path')
    parser.add_argument('--decode_mode',
                        choices=['greedy', 'beam'],
                        default='greedy')
    parser.add_argument('--loss', choices=['local-joint', 'local-separate', 'global-joint-normal', \
        'global-joint-logsoftmax', 'global-separate-normal', 'global-separate-logsoftmax'])

    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % (args.gpu_id)

    logger, args.root_dir = init_logging('log-%s' % args.model_name,
                                         time.time())

    dataset = CoNLL03_Dataset(args.train_path, [args.dev_path, args.test_path],
                              logger)

    dataset.read_embedding(args.embedding_type, args.embedding_path)

    print("Create Alphabets...")
    alphabet_path = os.path.join(args.root_dir, 'alphabet')
    dataset.create_alphabets(alphabet_path, max_vocabulary_size=50000)

    args.checkpoint_path = os.path.join(args.root_dir, 'checkpoint')
    os.makedirs(args.checkpoint_path)

    args.tmp_path = os.path.join(args.root_dir, 'tmp')
    os.makedirs(args.tmp_path)

    writer = CoNLL03Writer(dataset._word_alphabet, dataset._char_alphabet, dataset._pos_alphabet, \
            dataset._chunk_alphabet, dataset._ner_alphabet)

    with open(os.path.join(args.checkpoint_path, 'args.json'), 'w') as f:
        json.dump(vars(args), f)

    if args.model_name == 'RNNT':
        trainer = RNNT_Trainer(args, dataset, logger, writer)
    elif args.model_name == 'NCRFT':
        trainer = NCRFT_Trainer(args, dataset, logger, writer)

    trainer.train()
Ejemplo n.º 9
0
def run(hps="teeny", port=29500, **kwargs):
    #from jukebox.utils.dist_utils import setup_dist_from_mpi
    from utils.dist_utils import setup_dist_from_mpi
    rank, local_rank, device = setup_dist_from_mpi(port=port)
    hps = setup_hparams(hps, kwargs)
    hps.ngpus = dist.get_world_size()
    hps.argv = " ".join(sys.argv)
    hps.bs_sample = hps.nworkers = hps.bs

    # Setup dataset
    data_processor = DataProcessor(hps)

    # Setup models
    vqvae = make_vqvae(hps, device)
    print_once(f"Parameters VQVAE:{count_parameters(vqvae)}")
    if hps.prior:
        prior = make_prior(hps, vqvae, device)
        print_once(f"Parameters Prior:{count_parameters(prior)}")
        model = prior
    else:
        model = vqvae

    # Setup opt, ema and distributed_model.
    opt, shd, scalar = get_optimizer(model, hps)
    ema = get_ema(model, hps)
    distributed_model = get_ddp(model, hps)

    logger, metrics = init_logging(hps, local_rank, rank)
    logger.iters = model.step

    # 手直し=======================================================================
    print("start epoch=", hps.curr_epoch)
    print("end epoch=", hps.epochs)
    print("epoch length=", len(range(hps.curr_epoch, hps.epochs)))
    #==============================================================================

    # Run training, eval, sample
    for epoch in range(hps.curr_epoch, hps.epochs):
        # 手直し=======================================================================
        print(datetime.datetime.now(), " epoch=", epoch)
        #==============================================================================
        metrics.reset()
        data_processor.set_epoch(epoch)
        if hps.train:
            train_metrics = train(distributed_model, model, opt, shd, scalar,
                                  ema, logger, metrics, data_processor, hps)
            train_metrics['epoch'] = epoch
            if rank == 0:
                print(
                    'Train', ' '.join([
                        f'{key}: {val:0.4f}'
                        for key, val in train_metrics.items()
                    ]))
            dist.barrier()

        if hps.test:
            if ema: ema.swap()
            test_metrics = evaluate(distributed_model, model, logger, metrics,
                                    data_processor, hps)
            test_metrics['epoch'] = epoch
            if rank == 0:
                print(
                    'Ema', ' '.join([
                        f'{key}: {val:0.4f}'
                        for key, val in test_metrics.items()
                    ]))
            dist.barrier()
            if ema: ema.swap()
        dist.barrier()