def main():
    init_env('1')
    loaders = make_data_loaders(cfg)
    model = build_model(cfg)
    model = model.cuda()
    task_name = 'base_unet'
    log_dir = os.path.join(cfg.LOG_DIR, task_name)
    cfg.TASK_NAME = task_name
    mkdir(log_dir)
    logger = setup_logger('train', log_dir, filename='train.log')
    logger.info(cfg)
    logger = setup_logger('eval', log_dir, filename='eval.log')
    optimizer, scheduler = make_optimizer(cfg, model)
    metrics = get_metrics(cfg)
    losses = get_losses(cfg)
    train_val(model, loaders, optimizer, scheduler, losses, metrics)
Exemplo n.º 2
0
def notice_clean():
    global logger
    logger_name = "nebula.notice.cleaner"
    start_time = time.time()
    error_type = None
    job_name = "Clean table notice"
    try:
        logger = utils.init_env(logger_name)
        notice_clean_recorder = MetricsRecorder("cronjob.notice_clean",
                                               expire=86400* 60, #2month
                                               interval=300, #5min to merge
                                               type="sum",
                                               db="nebula.offline")
        cj = NoticeCleanCronJob()
        notice_clean_recorder.record(1, {"status":"run"})
        cj.start()
        
        status = "success"
    except Exception as e:
        logger.exception(e.message)
        status = "fail"
        error_type = e.message
    finally:
        costs = (time.time() - start_time)/ 60.0
        logger.info("Cronjob(%s) start at %s has been %s , costs %s min.", job_name, start_time, status, costs)
        notice_clean_recorder.record(1, {"status":status,
                                         "error_type":error_type,
                                         "costs": costs})
        # wait for metrics to write.
        gevent.sleep(60)
def notice_stat(timestamp):
    global logger
    
    logger = utils.init_env("database.test.notice_stat")
    # gen test notice
    notices = []
    with codecs.open("tests/notice_data_tem.json", encoding='utf-8') as f:
        dt = json.load(f)
        if not timestamp:
            timestamp = utils.get_last_hour()
            
        t = datetime.strptime(timestamp, settings.LogPath_Format)
        settings.Working_TS = time.mktime((t.year, t.month, t.day, t.hour, t.minute, t.second, 0, 0, 0))
        while len(notices) < settings.Test_Count:
            for _ in dt:
                _["last_modified"] = _["timestamp"] = int(settings.Working_TS) * 1000
                _["id"] = None
                notices.append(_)
    # insert test notice data into mysql
    conn = settings.db.connect()
    try:
        tran = conn.begin()
        conn.execute(Notice.__table__.insert(), notices)
        tran.commit()
    except Exception as e:
        tran.rollback()
        logger.exception(e.message)
        sys.exit(-1)
    
    # run cron job
    cmd = ["python cron_jobs.py ",]
    if is_debug:
        cmd.append("--debug ")
    cmd.append("notice_stat ")
    if timestamp:
        cmd.append("--timestamp %s " % timestamp)
    logger.debug("input cmd: %s" % ''.join(cmd))
    p = Popen(''.join(cmd), shell=True, stdout=PIPE, stderr=PIPE)
    sout, serr = p.communicate()
#    print p.returncode, sout, serr
    if p.returncode != 0:
        logger.error(serr)
    print sout
Exemplo n.º 4
0
def notice_stat(timestamp):
    global logger
    logger_name = "nebula.notice_stat.writer"
    job_name = "generate notice stat"
    error_type = None
    start_time = time.time()
    # 获取需要转换notice -> notice_stat 的时间戳, 如果不指定,默认转换的notice的时间范围是上个小时内
    if not timestamp:
        timestamp = utils.get_last_hour()
    t = datetime.strptime(timestamp, settings.LogPath_Format)
    settings.Working_TS = time.mktime((t.year, t.month, t.day, t.hour, t.minute, t.second, 0, 0, 0))
    settings.Working_DAY = int(time.mktime((t.year, t.month, t.day, 0, 0, 0, 0, 0, 0)))
    click.echo(u"所使用的工作小时的时间戳是:%s, 既:%s" %
               (settings.Working_TS, datetime.fromtimestamp(settings.Working_TS)))
    click.echo(u"所处的日期是%s, 既:%s" %
               (settings.Working_DAY, datetime.fromtimestamp(settings.Working_DAY*1.0)))
    
    try:
        logger = utils.init_env(logger_name)
        notice_stat_recorder = MetricsRecorder("cronjob.notice_stat",
                                               expire=86400* 60, #2month
                                               interval=300, #5min to merge
                                               type="sum",
                                               db="nebula.offline")
        utils.get_strategies_weigh()
        cj = NoticeStatCronJob()
        notice_stat_recorder.record(1, {"status":"run", "workingts":settings.Working_TS})
        cj.start()
        status = "success"
    except Exception as e:
        logger.exception(traceback.format_exc())
        status = "fail"
        error_type = e.message
    finally:
        costs = (time.time() - start_time)/ 60.0
        logger.info("Cronjob(%s) working ts: %s has been %s, costs: %s min.", job_name, settings.Working_TS, status, costs)
        notice_stat_recorder.record(1, {"status":status,
                                        "workingts":settings.Working_TS,
                                        "error_type":error_type})
        # wait for metrics to write.
        gevent.sleep(60)
Exemplo n.º 5
0
from utils import init_env
init_env('7')
from hpi import *
warnings.filterwarnings('ignore')
train_val_names = list({f[:36] for f in os.listdir(cfg.train_dir)})
test_names = list({f[:36] for f in os.listdir(cfg.test_dir)})
train_names, val_names = train_test_split(train_val_names,
                                          test_size=0.1,
                                          random_state=42)
batch_size = cfg.batch_size
target_size = 512
img_ds = get_data(train_names,
                  val_names,
                  test_names,
                  target_size,
                  batch_size,
                  n_workers=5)
learner = ConvLearner.pretrained(dpn92, img_ds, ps=[0.5])  # use dropout 50%
learner.opt_fn = optim.Adam
learner.clip = 1.0
learner.crit = FocalLoss()
learner.metrics = [acc]
print(learner.summary)
lr = 5e-4
learner.fit(lr, 1)
learner.unfreeze()
lrs = np.array([lr / 10, lr / 3, lr])
learner.fit(lrs / 4,
            4,
            cycle_len=2,
            use_clr=(10, 20),
Exemplo n.º 6
0
def notice():
    global logger
    logger = utils.init_env("nebula.notice.writer")
    server = NoticeServer()
    start_server(server)
Exemplo n.º 7
0
def incident():
    global logger
    logger = utils.init_env("nebula.incident.writer")
    server = IncidentServer()
    start_server(server)
Exemplo n.º 8
0
def incident():
    global logger
    logger = utils.init_env("test.babel_client.incident", only_babel=True)
    babel_client = get_incident_notify_client()
    c = IncidentClient(babel_client)
    c.run()
Exemplo n.º 9
0
    model = get_model()['dpn92']().cuda()
    # criterion = get_loss()['bce'].cuda()
    # optimizer = optim.SGD(model.parameters(), lr=cfg.lr, momentum=0.9, weight_decay=1e-4)
    # scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.35)
    # model = train(task_name, model, optimizer, criterion, scheduler, train_loader, val_loader, log=log)
    submission_best_loss(task_name, model, test_loader, log=log)


def base_dpn92_800_kfold(k=5, n_select=0):
    task_name = "dpn92_8_KF" + str(n_select)
    makedir(os.path.join(cfg.log_dir, task_name))
    log = Logger(os.path.join(cfg.log_dir, task_name + '_log.txt'), mode="a")
    log("\n\n" + '-' * 51 +
        "[START %s]" % datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
        "-" * 51 + "\n\n")
    print(cfg, file=log)
    train_loader, val_loader, test_loader = get_kfold_dataloader(
        k, n_select=n_select, use_extra=True, target_shape=(800, 800))
    model = get_model()['dpn92']().cuda()
    # criterion = get_loss()['bce'].cuda()
    # optimizer = optim.SGD(model.parameters(), lr=cfg.lr, momentum=0.9, weight_decay=1e-4)
    # scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.35)
    # model = train(task_name, model, optimizer, criterion, scheduler, train_loader, val_loader, log=log)
    submission_best_loss(task_name, model, test_loader, log=log)


if __name__ == "__main__":
    init_env('5')
    makedir(cfg.log_dir)
    makedir(cfg.submit_dir)
    base_dpn92_kfold(5, 0)