Esempio n. 1
0
def main():
    args = get_args()
    config = load_config('recv', str(args.config))
    init_log('hg-agent-forwarder', args.debug)
    shutdown = create_shutdown_event()

    receivers = []
    udp_recv = MetricReceiverUdp(config)
    receivers.append(udp_recv)

    tcp_recv = MetricReceiverTcp(config)
    receivers.append(tcp_recv)

    for receiver in receivers:
        receiver.start()
        logging.info("Started thread for %s", receiver)

    while not shutdown.is_set():
        time.sleep(5)

    for receiver in receivers:
        while receiver.is_alive():
            receiver.shutdown()
            receiver.join(timeout=0.1)
            time.sleep(0.1)
    logging.info("Metric receivers closed.")
Esempio n. 2
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: a dim %r, agent dim: %d' % (env.n_a_ls, env.n_agent))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    model = init_agent(env, config['MODEL_CONFIG'], total_step, seed)

    # disable multi-threading for safe SUMO implementation
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env, model, global_counter, summary_writer, output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Esempio n. 3
0
def evaluate(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir, pathes=['eva_data', 'eva_log'])
    init_log(dirs['eva_log'])
    # enforce the same evaluation seeds across agents
    seeds = args.evaluate_seeds
    logging.info('Evaluation: random seeds: %s' % seeds)
    if not seeds:
        seeds = []
    else:
        seeds = [int(s) for s in seeds.split(',')]
    evaluate_fn(base_dir, dirs['eva_data'], seeds, 1)
Esempio n. 4
0
def main():
    init_log(LOGFILE)
    try:
        options = parse_options()
    except Usage, (msg, no_error):
        if no_error:
            out = sys.stdout
            ret = 0
        else:
            out = sys.stderr
            ret = 2
        if msg:
            print >> out, msg
        return ret
Esempio n. 5
0
def main():
    args = get_args()
    config = load_config('forwarder', str(args.config))
    init_log('hg-agent-forwarder', args.debug)
    shutdown = create_shutdown_event()
    logging.info("Metric forwarder starting.")

    metric_forwarder = MetricForwarder(config, shutdown)
    metric_forwarder.start()
    while not shutdown.is_set():
        time.sleep(5)

    logging.info("Metric forwarder shutting down")
    metric_forwarder.shutdown()
    logging.info("Metric forwarder finished.")
Esempio n. 6
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir) #utils
    init_log(dirs['log'])#utils
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

# init env
    env = init_env(config['ENV_CONFIG']) #seeonce
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls)) #logging?


    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)#what is this
# init centralized or multi agent

    seed = config.getint('ENV_CONFIG', 'seed')
    if env.agent == 'iddpg':
        model = IDDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    elif env.agent == 'maddpg':  #TODO: Add MADDPG
        model = MADDPG(env.n_s_ls, env.n_a_ls, env.n_w_ls, env.n_f_ls, total_step,
                     config['MODEL_CONFIG'], seed=seed)
    summary_writer = tf.summary.FileWriter(dirs['log'])#what is this
    trainer = Trainer(env, model, global_counter, summary_writer, in_test, output_path=dirs['data'])#utils
    trainer.run()
   #if post_test: #how?
    #    tester = Tester(env, model, global_counter, summary_writer, dirs['data'])
     #   tester.run_offline(dirs['data'])#utils

    # save model#what's this
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Esempio n. 7
0
def evaluate(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir, pathes=['eva_data', 'eva_log'])
    init_log(dirs['eva_log'])
    agents = args.agents.split(',')
    # enforce the same evaluation seeds across agents
    seeds = args.evaluate_seeds
    logging.info('Evaluation: random seeds: %s' % seeds)
    if not seeds:
        seeds = []
    else:
        seeds = [int(s) for s in seeds.split(',')]
    threads = []
    for i, agent in enumerate(agents):
        agent_dir = base_dir + '/' + agent
        thread = threading.Thread(target=evaluate_fn,
                                  args=(agent_dir, dirs['eva_data'], seeds, i))
        thread.start()
        threads.append(thread)
    for thread in threads:
        thread.join()
def load_settings(env=None):
    global settings_dict, curr_env
    mass_redis_settings = EnvSettings()

    mass_redis_settings.cpg = ConfigParser.SafeConfigParser()
    if env is None or env == EnvironmentType.LOCAL:
        mass_redis_settings.cpg.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), r'config.txt'))
    else:
        mass_redis_settings.cpg.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), r'config_%s.txt' % env))

    mass_redis_settings.ENV = mass_redis_settings.cpg.get('general', 'env')

    # Set only once so that this maintains a reference to the environment the app is running in
    # Don't want to do this on subsequent loads since when user requests a provider in a different environment
    if curr_env is None:
        curr_env = mass_redis_settings.ENV

    mass_redis_settings.LOG_ROOT = mass_redis_settings.cpg.get('general', 'log_root')
    mass_redis_settings.LOG_NAME = 'generic_redis_cache.log'
    mass_redis_settings.LOG_LEVEL = mass_redis_settings.cpg.get('general', 'log_level')

    utils.init_log(mass_redis_settings.LOG_NAME, mass_redis_settings.LOG_ROOT)
    level = logging.getLevelName(mass_redis_settings.LOG_LEVEL)
    utils.set_log_level(level, mass_redis_settings.LOG_NAME)

    logger = logging.getLogger(mass_redis_settings.LOG_NAME)

    # Redis Settings
    mass_redis_settings.REDIS_HOST = mass_redis_settings.cpg.get('general', 'redis_server')
    mass_redis_settings.REDIS_PORT = mass_redis_settings.cpg.get('general', 'redis_port')
    mass_redis_settings.REDIS_PASSWORD = mass_redis_settings.cpg.get('general', 'redis_password')
    mass_redis_settings.REDIS_DB = mass_redis_settings.cpg.get('general', 'redis_db')

    # For debugging purposes, log settings data
    logger.info('log_root: %s, log_name:%s, log_level:%s' % (mass_redis_settings.LOG_ROOT, mass_redis_settings.LOG_NAME,
                                                             mass_redis_settings.LOG_LEVEL))

    settings_dict[mass_redis_settings.ENV] = mass_redis_settings
Esempio n. 9
0
def main():
    """
    Execution
    """

    startTime = time.time()

    #pidfile = make_pidlockfile('/tmp/test-daemon.pid', 0)

    #if is_pidfile_stale(pidfile):
            #pidfile.break_lock()

    #if pidfile.is_locked():
            #print 'daemon is running with PID %s already' % pidfile.read_pid()
            #sys.exit(0)

    #with DaemonContext(pidfile=pidfile):

    #define counters
    fix.count = 0
    apis_check.count = 0

    #initialize syslog configuration
    sys_logger = init_log()

    #crawl web hosts and log/email failures if any
    web_monitor(sys_logger)

    #execute(gfs_check)
    #execute(meminfo)
    #execute(swapinfo)

    #crawl API hosts and log/email failires if any
    execute(apis_check, sys_logger)

    #disconnect from all servers
    disconnect_all()
    #time.sleep(1)

    endTime = time.time()
    elapsedTime = endTime - startTime

    print elapsedTime
Esempio n. 10
0
    def set_user_options(self):
        self.log = init_log()
        self.log.info('=' * 50)
        self.log.info("Start Program...")

        self.log.info("...")
        self.usage = "usage: %prog [options] arg1 arg2"
        self.parser = optparse.OptionParser(usage=self.usage)
        self.parser = optparse.OptionParser()

        self.parser.add_option("-f",
                               "--files",
                               action="store_true",
                               default=False,
                               help="read data from FILE",
                               metavar="FILE")
        self.parser.add_option("-r",
                               "--regexp",
                               action="store_true",
                               default=False,
                               help="regexp pattern to match in files")
        self.parser.add_option("-u",
                               "--underscore",
                               action="store_true",
                               default=False,
                               help="print lines with underscore symbol")
        self.parser.add_option("-c",
                               "--color",
                               action="store_true",
                               default=False,
                               help="print colored lines")
        self.parser.add_option("-m",
                               "--machine",
                               action="store_true",
                               default=False,
                               help="print machine format lines")
Esempio n. 11
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import pika
import signal
import sys
sys.path.append('..')
import utils

mylog = utils.init_log()
channel, connection = utils.create_channel()


def fib(n):
    if n < 1:
        return []
    elif n == 1:
        return [
            1,
        ]
    elif n == 2:
        return [
            1,
            1,
        ]
    else:
        result = [1, 1]
        while len(result) < n:
            result.append(result[-1] + result[-2])
        return result

Esempio n. 12
0
def train(args):
    # gpu init
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # log init
    save_dir = os.path.join(args.save_dir, datetime.now().date().strftime('%Y%m%d'))
    if not os.path.exists(save_dir):
        #raise NameError('model dir exists!')
        os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info
    # summary(net.to(config.device), (3,112,112))
    #define tranform
    transform = transforms.Compose([
        transforms.Resize((112, 112)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    net = EfficientNet.from_name('efficientnet-b0', num_classes=2)

    # validation dataset
    trainset = ANTI(train_root="/mnt/sda3/data/FASD", file_list = "train.txt", transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size = 2,
                                             shuffle=True, num_workers=8, drop_last=False)

    # define optimizers for different layer
    criterion = torch.nn.CrossEntropyLoss().to(device)
    optimizer_ft = optim.SGD([
        {'params': net.parameters(), 'weight_decay': 5e-4},
    ], lr=0.001, momentum=0.9, nesterov=True)

    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones= [6, 10, 30], gamma=0.1)
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    total_iters = 1
    vis = Visualizer(env= "effiction")

    for epoch in range(1, args.total_epoch + 1):
        exp_lr_scheduler.step()
        _print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch))
        net.train()
        since = time.time()
        for data in trainloader:
            img, label = data[0].to(device), data[1].to(device)
            optimizer_ft.zero_grad()
            raw_logits = net(img)
            total_loss = criterion(raw_logits, label)
            total_loss.backward()
            optimizer_ft.step()
            # print train information
            if total_iters % 200 == 0:
                # current training accuracy
                _, predict = torch.max(raw_logits.data, 1)
                total = label.size(0)
                correct = (np.array(predict) == np.array(label.data)).sum()
                time_cur = (time.time() - since) / 100
                since = time.time()
                vis.plot_curves({'softmax loss': total_loss.item()}, iters=total_iters, title='train loss',
                                xlabel='iters', ylabel='train loss')
                vis.plot_curves({'train accuracy': correct / total}, iters=total_iters, title='train accuracy', xlabel='iters',
                                ylabel='train accuracy')

                print("Iters: {:0>6d}/[{:0>2d}], loss: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, epoch, total_loss.item(), correct/total, time_cur, exp_lr_scheduler.get_lr()[0]))

            # save model
            if total_iters % args.save_freq == 0:
                msg = 'Saving checkpoint: {}'.format(total_iters)
                _print(msg)
                if multi_gpus:
                    net_state_dict = net.module.state_dict()
                else:
                    net_state_dict = net.state_dict()
                   
                if not os.path.exists(save_dir):
                    os.mkdir(save_dir)

                torch.save({
                    'iters': total_iters,
                    'net_state_dict': net_state_dict},
                    os.path.join(save_dir, 'Iter_%06d_net.ckpt' % total_iters))

            # test accuracy
            if total_iters % args.test_freq == 0 and args.has_test:
                # test model on lfw
                net.eval()
                _print('LFW Ave Accuracy: {:.4f}'.format(np.mean(lfw_accs) * 100))

                net.train()
            total_iters += 1
    print('finishing training')
Esempio n. 13
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))  #1e6
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))  #2e4
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))  #1e4
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')  #12
    # coord = tf.train.Coordinator()

    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'codql':
        print('This is codql')
        num_agents = len(env.n_s_ls)
        print('num_agents:', num_agents)
        a_dim = env.n_a_ls[0]  # ?????????????????? dim ??or num??
        print('a_dim:', a_dim)
        s_dim = env.n_s_ls[0]
        print('env.n_s_ls=', s_dim)
        s_dim_wait = env.n_w_ls[0]
        print('s_dim_wait:', s_dim_wait)
        #obs_space = s_dim # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXxx state dim Error
        model = MFQ(nb_agent=num_agents,
                    a_dim=a_dim,
                    s_dim=s_dim,
                    s_dim_wave=s_dim - s_dim_wait,
                    s_dim_wait=s_dim_wait,
                    config=config['MODEL_CONFIG'])
    elif env.agent == 'dqn':
        model = DQN(nb_agent=len(env.n_s_ls),
                    a_dim=env.n_a_ls[0],
                    s_dim=env.n_s_ls[0],
                    s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                    s_dim_wait=env.n_w_ls[0],
                    config=config['MODEL_CONFIG'],
                    doubleQ=False)  #doubleQ=False denotes dqn else ddqn
    elif env.agent == 'ddpg':
        model = DDPGEN(nb_agent=len(env.n_s_ls),
                       share_params=True,
                       a_dim=env.n_a_ls[0],
                       s_dim=env.n_s_ls[0],
                       s_dim_wave=env.n_s_ls[0] - env.n_w_ls[0],
                       s_dim_wait=env.n_w_ls[0])
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Esempio n. 14
0
def get_cards():
    num = 0
    cards = os.environ.get('CUDA_VISIBLE_DEVICES', '')
    if cards != '':
        num = len(cards.split(","))
    return num


if __name__ == "__main__":
    import paddle
    paddle.enable_static()

    args = ArgConfig()
    args = args.build_conf()

    utils.print_arguments(args)
    check_cuda(args.use_cuda)
    check_version()
    utils.init_log("./log/TextSimilarityNet")
    conf_dict = config.SimNetConfig(args)
    if args.do_train:
        train(conf_dict, args)
    elif args.do_test:
        test(conf_dict, args)
    elif args.do_infer:
        infer(conf_dict, args)
    else:
        raise ValueError(
            "one of do_train and do_test and do_infer must be True")
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('..')
import utils

myLog = utils.init_log()


def recv_info(ch, method, properties, body):
    myLog.info(body)


if __name__ == '__main__':
    channel, connection = utils.create_channel()

    channel.exchange_declare('logs_topic', 'topic')
    # 声明一个随机的队列result,当消费者断开的时候,队列会被删除
    result = channel.queue_declare(exclusive=True)
    queue_name = result.method.queue

    # 绑定键从用户输入
    routing_keys = sys.argv[1:] if len(sys.argv) > 1 else [
        'anonymous.info',
    ]
    for item in routing_keys:
        myLog.info(item)
        channel.queue_bind(exchange='logs_topic',
                           queue=queue_name,
                           routing_key=item)
Esempio n. 16
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: s dim: %d, a dim %d, s dim ls: %r, a dim ls: %r' %
                 (env.n_s, env.n_a, env.n_s_ls, env.n_a_ls))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')
    # coord = tf.train.Coordinator()

    # if env.agent == 'a2c':
    #     model = A2C(env.n_s, env.n_a, total_step,
    #                 config['MODEL_CONFIG'], seed=seed)
    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ma2c':
        model = MA2C(env.n_s_ls,
                     env.n_a_ls,
                     env.n_w_ls,
                     env.n_f_ls,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'iqld':
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='dqn')
    else:
        model = IQL(env.n_s_ls,
                    env.n_a_ls,
                    env.n_w_ls,
                    total_step,
                    config['MODEL_CONFIG'],
                    seed=0,
                    model_type='lr')

    # disable multi-threading for safe SUMO implementation
    # threads = []
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()
    # if in_test or post_test:
    #     # assign a different port for test env
    #     test_env = init_env(config['ENV_CONFIG'], port=1)
    #     tester = Tester(test_env, model, global_counter, summary_writer, dirs['data'])

    # def train_fn():
    #     trainer.run(coord)

    # thread = threading.Thread(target=train_fn)
    # thread.start()
    # threads.append(thread)
    # if in_test:
    #     def test_fn():
    #         tester.run_online(coord)
    #     thread = threading.Thread(target=test_fn)
    #     thread.start()
    #     threads.append(thread)
    # coord.join(threads)

    # post-training test
    if post_test:
        tester = Tester(env, model, global_counter, summary_writer,
                        dirs['data'])
        tester.run_offline(dirs['data'])

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)
Esempio n. 17
0
        except Exception as e:
            logging.exception("GooglePlaySearchAPI:net_get_nbp:Exception:%s" % e)
        finally:
            return text


# --------- test ---------


if __name__ == "__main__":
    import json
    import utils
    from GooglePlayDetailParser import GooglePlayDetailParser

    p = GooglePlayDetailParser()
    utils.init_log("main.log", True)

    proxies = {
        "https": "http://127.0.0.1:8118",
        "http": "http://127.0.0.1:8118",
    }
    api = GooglePlaySearchAPI(proxies=proxies)
    data = api.search("locker", "en", 'US')
    # html = api.net_get("Погода", "ru-RU")
    # print html
    # p.feed(html)
    # data = p.get_data()
    # print len(data)
    with open("search.json", 'w') as f:
        f.write(json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False).encode('utf-8'))
Esempio n. 18
0
import hug
from db import InitDB
from utils import init_log
from falcon.request import Request
from falcon.response import Response
from models import ModelError, get_model
from serializers import LinksSerializer, TimeSerializer

""" Инстанс базы данных инициализируется исходя из настроек сервера. 
Инстанс лога должен сосдаваться один раз, поэтому глобальный """

db_engine = InitDB()
error_log = init_log("error")


@hug.request_middleware()
def process_data(request: Request, response: Response):
    """ 'from' is a build-in python name so we have to rename it before using.

    """
    if "from" in request.params:
        request.params["_from"] = request.params.pop("from")


@hug.get('/visited_domains')
def visited_domains(_from: TimeSerializer()=None, to: TimeSerializer()=None) -> dict:
    """ Visited_domains GET request controller

    :param _from: start of period
    :param to: end of period
    :return: list of visited domains during period
Esempio n. 19
0
# -*- coding: utf-8 -*-
import codecs, os, logging
import requests
from bs4 import BeautifulSoup
import json
import sys
from utils import load_owner_list, init_log

reload(sys)
sys.setdefaultencoding('utf8')

########################### LOGGING ############################################
g_working_dir = os.path.dirname(os.path.realpath(__file__))
g_log_file = "{}/log/{}.log".format(g_working_dir, os.path.basename(__file__))
logger = init_log(g_log_file, __name__)
################################################################################

# For demo.oracle.com

with codecs.open('./conf.json', 'r', encoding='utf8') as conf_list_file:
    config = json.load(conf_list_file)
    session_id = config["demo.oracle.com"]["session_id"]
    cookie = config["demo.oracle.com"]["cookie"]

result_pages_folder = 'result_pages/demo'
my_asset_list = "current asset list: "

headers = {
    'Accept':
    'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding':
Esempio n. 20
0
def main():
    init_log()
    AutoBuilder().execute()
Esempio n. 21
0
	if valid > 0:
		log('VALID: ' + str(valid))
	else: 
		log('NOT VALID !!')
	log('execution time: ' + str(datetime.now() - startTime))
	log('')

	json_data = {"Success": 1 if valid > 0 else 0, "ErrorReason": reason}	
	print (json.dumps(json_data, ensure_ascii=True, indent=None, sort_keys=True))

	return 1 if valid > 0 else 0


# Main 
#	Cita ./test/ direktorij te validira slike u njemu
if __name__ == '__main__':

	img_path = sys.argv[1]
	country = sys.argv[2].lower()
	side = sys.argv[3].lower()

	log_path = './logs/log_' + country.upper() + '_' + side + '.txt'
	init_log(log_path)

	try: 
		validate(img_path, country, side)
	except Exception as e:
		json_data = {"Success": 0, "ErrorReason": 'execution error'}	
		print (json.dumps(json_data, ensure_ascii=True, indent=None, sort_keys=True))
		log('   ERROR: {0}'.format(e))
		log('')
Esempio n. 22
0
# Import the Earth Engine Python Package
import ee
import pandas as pd
import numpy as np
import time
import configparser
from utils import init_log
from config import *

import os

# Start logging
logging = init_log(".")
_print = logging.info

# Reading data
data = pd.read_csv(input_file,
                   sep='\t',
                   usecols=[column_latitude, column_longitude])

data = data.values

# Para mudar de conta:
# earthengine authenticate

# Initialize the Earth Engine object, using the authentication credentials.
ee.Initialize()


def degree_conv(var):
    data = var.split("°", 1)
Esempio n. 23
0
def main():
    args = configparser.ConfigParser()
    args.read('argsConfig.ini')


    if args.getboolean('Log', 'flush_history') == 1:
        objects = os.listdir(args.get('Log', 'log_path'))
        for f in objects:
            if os.path.isdir(args.get('Log', 'log_path') + f):
                shutil.rmtree(args.get('Log', 'log_path') + f)

    if args.getboolean('Log', 'delete_model_name_dir'):
        objects = os.listdir(args.get('Log', 'output'))
        for f in objects:
            if f == args.get('Log', 'model_name'):
                shutil.rmtree(args.get('Log', 'output') + args.get('Log', 'model_name') + '/')

    now = datetime.now()
    logdir = args.get('Log', 'log_path') + now.strftime("%Y%m%d-%H%M%S") + "/"
    os.makedirs(logdir)
    log_file = logdir + 'log.txt'
    writer = SummaryWriter(logdir)

    texts, labels, number_of_classes, sample_weights = load_data(args, 'train')

    class_names = sorted(list(set(labels)))
    class_names = [str(class_name - 1) for class_name in class_names]

    train_texts, X_dev, train_labels, y_dev_labels, train_sample_weights, _ = train_test_split(texts,
                                                                                               labels,
                                                                                               sample_weights,
                                                                                               train_size=args.getfloat(
                                                                                                   'Train',
                                                                                                   'train_size'),
                                                                                               test_size=args.getfloat(
                                                                                                   'Train',
                                                                                                   'dev_size'),
                                                                                               random_state=42,
                                                                                               stratify=labels)

    training_set = MyDataset(train_texts, train_labels, args)
    validation_set = MyDataset(X_dev, y_dev_labels, args)

    training_params = {"batch_size": args.getint('Train', 'batch_size'),
                       "shuffle": True,
                       "num_workers": args.getint('Train', 'workers'),
                       "drop_last": True}

    validation_params = {"batch_size": args.getint('Train', 'batch_size'),
                         "shuffle": False,
                         "num_workers": args.getint('Train', 'workers'),
                         "drop_last": True}

    if args.getboolean('Train', 'use_sampler'):
        train_sample_weights = torch.from_numpy(train_sample_weights)
        sampler = WeightedRandomSampler(train_sample_weights.type(
            'torch.DoubleTensor'), len(train_sample_weights))
        training_params['sampler'] = sampler
        training_params['shuffle'] = False

    training_generator = DataLoader(training_set, **training_params)
    validation_generator = DataLoader(validation_set, **validation_params)

    model = CharacterLevelCNN(number_of_classes, args)

    if args.getboolean('Model', 'visualize_model_graph'):
        x = torch.zeros((args.getint('Train', 'batch_size'),
                       args.getint('DataSet', 'char_num'),
                       args.getint('DataSet', 'l0')))
        out = model(x)
        make_dot(out).render("CharacterLevelCNN", format="png", quiet_view=True)

    if torch.cuda.is_available():
        model.cuda()

    # todo check other other loss functions for binary and multi-label problems
    if args.get('Train', 'criterion') == 'nllloss':
        criterion = nn.NLLLoss()

    # criterion = nn.BCELoss()

    # optimization scheme
    if args.get('Train', 'optimizer') == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.getfloat('Train', 'lr'))
    elif args.get('Train', 'optimizer') == 'SGD':
        if args.get('Train', 'scheduler') == 'clr':
            optimizer = torch.optim.SGD(
                model.parameters(), lr=1, momentum=0.9, weight_decay=0.00001
            )
        else:
            optimizer = optim.SGD(model.parameters(), lr=args.getfloat('Train', 'lr'), momentum=0.9)
    elif args.get('Train', 'optimizer') == 'ASGD':
        optimizer = optim.ASGD(model.parameters(), lr=args.getfloat('Train', 'lr'))

    if os.path.isfile(args.get('Log', 'continue_from_model_checkpoint')):
        print("=> loading checkpoint from '{}'".format(args.get('Log', 'continue_from_model_checkpoint')))
        checkpoint = torch.load(args.get('Log', 'continue_from_model_checkpoint'))
        start_epoch = checkpoint['epoch']
        start_iter = checkpoint.get('iter', None)
        best_f1 = checkpoint.get('best_f1', None)
        if start_iter is None:
            start_epoch += 1  # Assume that we saved a model after an epoch finished, so start at the next epoch.
            start_iter = 0
        else:
            start_iter += 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
    else:
        start_iter = 0
        start_epoch = 0
        best_f1 = 0
        best_epoch = 0

    if args.get('Train', 'scheduler') == 'clr':
        stepsize = int(args.getint('Train', 'clr_step_size') * len(training_generator))
        clr = utils.cyclical_lr(stepsize, args.getfloat('Train', 'clr_min_lr'), args.getfloat('Train', 'clr_max_lr'))
        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, [clr])
    else:
        scheduler = None
        lr_half_cnt = 0

    utils.init_log(log_file=log_file, args=args, labels=class_names)
    try:
        for epoch in range(start_epoch, args.getint('Train', 'epochs')):

            training_loss, training_accuracy, train_f1 = train(model,
                                                               training_generator,
                                                               optimizer,
                                                               criterion,
                                                               epoch,
                                                               start_iter,
                                                               writer,
                                                               log_file,
                                                               scheduler,
                                                               class_names,
                                                               args,
                                                               args.getint('Log', 'print_out_every'))

            validation_loss, validation_accuracy, validation_f1 = evaluate(model,
                                                                           validation_generator,
                                                                           criterion,
                                                                           epoch,
                                                                           writer,
                                                                           log_file)


            print('\n[Epoch: {} / {}]\ttrain_loss: {:.4f} \ttrain_acc: {:.4f} \tval_loss: {:.4f} \tval_acc: {:.4f}'.
                  format(epoch + 1, args.getint('Train', 'epochs'), training_loss, training_accuracy, validation_loss,
                         validation_accuracy))
            print("=" * 50)

            with open(log_file, 'a') as f:
                f.write('[Epoch: {} / {}]\ttrain_loss: {:.4f} \ttrain_acc: {:.4f} \tval_loss: {:.4f} \tval_acc: {:.4f}\n'.
                  format(epoch + 1, args.getint('Train', 'epochs'), training_loss, training_accuracy, validation_loss,
                         validation_accuracy))
                f.write('=' * 50)

            # learning rate scheduling
            if args.get('Train', 'scheduler') == 'step':
                if args.get('Train', 'optimizer') == 'SGD' and ((epoch + 1) % 3 == 0) and lr_half_cnt < 10:
                    current_lr = optimizer.state_dict()['param_groups'][0]['lr']
                    current_lr /= 2
                    lr_half_cnt += 1
                    print('Decreasing learning rate to {0}'.format(current_lr))
                    with open(log_file, 'a') as f:
                        f.write('Decreasing learning rate to {0}\n'.format(current_lr))
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = current_lr

            if args.getboolean('Log', 'checkpoint'):

                state = {'epoch': epoch, 'optimizer': optimizer.state_dict(), 'best_f1': best_f1}

                if args.getint('Log', 'save_interval') > 0 and epoch % args.getint('Log', 'save_interval') == 0:
                    save_checkpoint(model, state, optimizer, args, epoch, validation_loss, validation_accuracy,
                                    validation_f1)

                if validation_f1 > best_f1:
                    best_f1 = validation_f1
                    best_epoch = epoch
                    save_checkpoint(model, state, optimizer, args, epoch, validation_loss, validation_accuracy,
                                    validation_f1)

            if args.getboolean('Train', 'early_stopping'):
                if epoch - best_epoch > args.getint('Train', 'patience') > 0:
                    print("Early-stopping: Stop training at epoch {}. The lowest loss achieved is {} at epoch {}".format(
                        epoch, validation_loss, best_epoch))
                    break
    except KeyboardInterrupt:
        print('Exit Keyboard interrupt\n')
        save_checkpoint(model, state, optimizer, args, epoch, validation_loss, validation_accuracy, validation_f1)
Esempio n. 24
0
from utils import init_log

init_log()
Esempio n. 25
0
def main():
    utils.init_log()
    bot.polling(none_stop=True)
Esempio n. 26
0
        gpu_list = str(GPU)
    else:
        multi_gpus = True
        for i, gpu_id in enumerate(GPU):
            gpu_list += str(gpu_id)
            if i != len(GPU) - 1:
                gpu_list += ','
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list

    # other init
    start_epoch = 1
    save_dir = os.path.join(SAVE_DIR, MODEL_PRE + 'v2_' + datetime.now().strftime('%Y%m%d_%H%M%S'))
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info


    # define trainloader and testloader
    trainset = CASIA_Face(root=CASIA_DATA_DIR)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
                                            shuffle=True, num_workers=8, drop_last=False)

    # nl: left_image_path
    # nr: right_image_path
    nl, nr, folds, flags = parseList(root=LFW_DATA_DIR)
    testdataset = LFW(nl, nr)
    testloader = torch.utils.data.DataLoader(testdataset, batch_size=32,
                                            shuffle=False, num_workers=8, drop_last=False)
Esempio n. 27
0
def train_net(args):
    torch.manual_seed(7)
    np.random.seed(7)
    checkpoint = args.checkpoint
    start_epoch = 0
    best_acc = float('-inf')
    import socket
    exp_name = datetime.now().strftime('%Y%m%d_%H%M%S')
    writer = SummaryWriter(
        os.path.join('runs_mobileNet', exp_name + '_' + socket.gethostname()))
    epochs_since_improvement = 0

    # Initialize / load checkpoint
    if checkpoint is None:
        # model = MobileFaceNet()
        # metric_fc = ArcMarginModel(args)
        model = MobileFaceNet_PRELU()
        # model = MobileFaceNet_PRELU_AirFace()
        # metric_fc = ArcMarginProduct()
        metric_fc = MV_Softmax()

        # For mobilenet RELU
        # optimizer = torch.optim.SGD([{'params': model.conv1.parameters()},
        #                              {'params': model.dw_conv.parameters()},
        #                              {'params': model.features.parameters()},
        #                              {'params': model.conv2.parameters()},
        #                              {'params': model.gdconv.parameters()},
        #                              {'params': model.conv3.parameters(), 'weight_decay': 4e-4},
        #                              {'params': model.bn.parameters()},
        #                              {'params': metric_fc.parameters()}],
        #                             lr=args.lr, momentum=args.mom, weight_decay=args.weight_decay, nesterov=True)

        # FOR mobileNet PRELU
        optimizer = torch.optim.SGD([{
            'params': model.parameters(),
            'weight_decay': 5e-4
        }, {
            'params': metric_fc.parameters(),
            'weight_decay': 5e-4
        }],
                                    lr=0.04,
                                    momentum=0.9,
                                    nesterov=True)

        model = nn.DataParallel(model)
        metric_fc = nn.DataParallel(metric_fc)

    else:
        print("Training from pretrained model!: " + str(checkpoint))
        checkpoint = torch.load(checkpoint)
        # start_epoch = checkpoint['epoch'] + 1
        start_epoch = 32
        epochs_since_improvement = checkpoint['epochs_since_improvement']
        model = checkpoint['model']

        metric_fc = checkpoint['metric_fc']
        # Enable to use new clasification layer
        # metric_fc = ArcMarginProduct()
        # metric_fc = nn.DataParallel(metric_fc)

        # Enable to use learning rate from pre-trained
        # optimizer = checkpoint['optimizer']
        # Enable to use new learning rate
        # optimizer = torch.optim.SGD([
        #     {'params': model.parameters(), 'weight_decay': 5e-4},
        #     {'params': metric_fc.parameters(), 'weight_decay': 5e-4}
        # ], lr=0.0004, momentum=0.9, nesterov=True)
        optimizer = torch.optim.Adam([{
            'params': model.parameters(),
            'weight_decay': 5e-4
        }, {
            'params': metric_fc.parameters(),
            'weight_decay': 5e-4
        }],
                                     lr=0.001)

    # log init
    save_dir = os.path.join('logs', 'train' + '_' + exp_name)
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logger = init_log(save_dir)
    # logger = get_logger()

    # Move to GPU, if available
    model = model.to(device)
    metric_fc = metric_fc.to(device)

    # Loss function
    if args.focal_loss:
        criterion = FocalLoss(gamma=args.gamma).to(device)
    else:
        criterion = nn.CrossEntropyLoss().to(device)

    if args.triplet:
        metric_fc = AngleLinear()
        criterion = AngleLoss()

    # Custom dataloaders
    dataset = ArcFaceDataset('train')
    print(dataset.__len__())
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    validation_split = 0.02
    split = int(np.floor(validation_split * dataset_size))
    shuffle_dataset = True
    if shuffle_dataset:
        np.random.seed(42)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]
    # Creating PT data samplers and loaders:
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=args.batch_size,
                                               num_workers=8,
                                               sampler=train_sampler,
                                               pin_memory=True)
    validation_loader = torch.utils.data.DataLoader(dataset,
                                                    batch_size=16,
                                                    num_workers=4,
                                                    sampler=valid_sampler,
                                                    pin_memory=True)

    # scheduler = MultiStepLR(optimizer, milestones=[10, 15, 20], gamma=0.1)  # 5, 10, 15, 2
    # for i in range(0, start_epoch):
    #     print('Learning rate={}'.format(optimizer.param_groups[0]['lr']))
    #     scheduler.step()
    # scheduler = ReduceLROnPlateau(optimizer, mode='max', verbose=True)
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer, max_lr=0.01, steps_per_epoch=len(train_loader), epochs=10)

    # Epochs
    for epoch in range(start_epoch, args.end_epoch):
        print('\nLearning rate={}\n'.format(optimizer.param_groups[0]['lr']))
        # One epoch's training
        train_loss, train_acc = train(train_loader=train_loader,
                                      model=model,
                                      metric_fc=metric_fc,
                                      criterion=criterion,
                                      optimizer=optimizer,
                                      epoch=epoch,
                                      logger=logger,
                                      scheduler=scheduler)
        val_acc = validate(val_loader=validation_loader,
                           model=model,
                           metric_fc=metric_fc)
        lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/train_acc', train_acc, epoch)
        writer.add_scalar('model/learning_rate', lr, epoch)
        writer.add_scalar('model/val_acc', val_acc, epoch)
        # One epoch's validation
        lfw_acc, threshold = lfw_test(model)
        writer.add_scalar('model/lfw_acc', lfw_acc, epoch)
        writer.add_scalar('model/threshold', threshold, epoch)
        logger.info('LFW Ave Accuracy: {:.4f}\t'
                    'Threshold: {:.4f}'.format(
                        np.mean(lfw_acc) * 100, threshold))
        # Check if there was an improvement
        is_best = val_acc > best_acc
        best_acc = max(val_acc, best_acc)
        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))
        else:
            epochs_since_improvement = 0

        model.train()
        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, metric_fc,
                        optimizer, best_acc, is_best, train_loss)
Esempio n. 28
0
from flask_cors import CORS
from flask_sock import Sock
import os
import uuid
import utils
import image_utils
import json
from PIL import Image
import traceback
import legolize
import palette
import base64
from io import BytesIO
import simple_websocket

logger = utils.init_log()
app = Flask(__name__)
CORS(app)
HOST = os.environ['HOST']
DEBUG = os.environ.get('DEBUG', 'False')
sock = Sock(app)

pal = palette.Palette()

logger.info(f"palette loaded of {len(pal.colors)} colors")


@app.route('/upload/<size>', methods=['POST'])
def upload(size):
    uid = str(uuid.uuid4())
    input_name = utils.input_name(uid)
Esempio n. 29
0
######################
#       Paths
######################
save_root = os.path.join(opt.checkpoint_dir, opt.tag)
log_root = os.path.join(opt.log_dir, opt.tag)

utils.try_make_dir(save_root)
utils.try_make_dir(log_root)

######################
#      DataLoaders
######################
train_dataloader = dl.train_dataloader
val_dataloader = dl.val_dataloader
# init log
logger = init_log(training=True)

######################
#     Init model
######################
Model = get_model(opt.model)
model = Model(opt)

# 暂时还不支持多GPU
# if len(opt.gpu_ids):
#     model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model = model.to(device=opt.device)

if opt.load:
    load_epoch = model.load(opt.load)
    start_epoch = load_epoch + 1 if opt.resume else 1
Esempio n. 30
0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import pika
import sys
sys.path.append("..")
from utils import init_log, create_channel

if __name__ == '__main__':
    mylog = init_log()
    channel, connection = create_channel()

    # 这次不使用默认的直连交换机而是扇形交换机
    # channel.exchange_declare(exchange='logs', type='fanout')
    channel.exchange_declare('logs', 'fanout')

    message = ' '.join(sys.argv[1:]) or 'INFO : Hello World'

    channel.basic_publish(exchange='logs', routing_key='', body=message)
    mylog.info(" [x] Sent Log : %r" % (message, ))

    connection.close()
Esempio n. 31
0
def train(args):
    writer = SummaryWriter()

    # gpu init
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # log init
    save_dir = args.save_dir
    if os.path.exists(save_dir):
        if not args.resume:
            raise NameError('model dir exists!')
    else:
        os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info

    # dataset loader
    train_transform = transforms.Compose([
                transforms.RandomCrop(128),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
                transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
                ])
    # validation dataset
    train_set = CASIAWebFace(args.train_data_info, transform = train_transform)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=2, drop_last=False)
    
    test_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(128),
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # range [0.0, 1.0] -> [-1.0,1.0]
    ])
    lfw_dataset = LFW('./data/lfw_funneled', './data/lfw_funneled/pairs.txt', transform = test_transform)
    lfw_dataloader = torch.utils.data.DataLoader(lfw_dataset, batch_size= 128, shuffle=False, num_workers=2, drop_last=False)
    
    net = LSCNN(num_classes= 10559, growth_rate = 48)
    
    if args.resume:
        print('resume the model parameters from: ', args.net_path)
        net.load_state_dict(torch.load(args.net_path)['net_state_dict'])

    # define optimizers for different layer
    criterion = torch.nn.CrossEntropyLoss().to(device)
    optimizer_ft = optim.SGD(net.parameters(), lr = 0.1, weight_decay = 1e-4, momentum = 0.9, nesterov = True)
    exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer_ft, milestones=[10, 20, 30], gamma=0.1)
    
    if multi_gpus:
        net = DataParallel(net).to(device)
    else:
        net = net.to(device)

    best_test_acc = 0.0
    best_test_iters = 0
    total_iters = 0

    for epoch in range(1, args.total_epoch + 1):
        exp_lr_scheduler.step()
        # train model
        _print('Train Epoch: {}/{} ...'.format(epoch, args.total_epoch))
        net.train()

        since = time.time()
        for data in train_loader:
            img, label = data[0].to(device), data[1].to(device)
            optimizer_ft.zero_grad()

            
            output = net(img)
            total_loss = criterion(output, label)
            total_loss.backward()
            optimizer_ft.step()

            total_iters += 1
            # print train information
            if total_iters % 100 == 0:
                # current training accuracy
                _, predict = torch.max(output.data, 1)
                total = label.size(0)
                correct = (np.array(predict.cpu()) == np.array(label.data.cpu())).sum()
                time_cur = (time.time() - since) / 100
                since = time.time()
                
                writer.add_scalar("Accuracy/train", correct / total , total_iters)
                writer.add_scalar("Loss/train", total_loss, total_iters)

                _print("Iters: {:0>6d}/[{:0>2d}], loss: {:.4f}, train_accuracy: {:.4f}, time: {:.2f} s/iter, learning rate: {}".format(total_iters, epoch, total_loss.item(), correct/total, time_cur, exp_lr_scheduler.get_last_lr()))

            # save model
            if total_iters % args.save_freq == 0:
                msg = 'Saving checkpoint: {}'.format(total_iters)
                _print(msg)
                if multi_gpus:
                    net_state_dict = net.module.state_dict()
                else:
                    net_state_dict = net.state_dict()

                if not os.path.exists(save_dir):
                    os.mkdir(save_dir)
                torch.save({
                    'iters': total_iters,
                    'net_state_dict': net_state_dict},
                    os.path.join(save_dir, 'Iter_%06d_net.ckpt' % total_iters))

            # test accuracy
            if total_iters % args.test_freq == 0:
                with torch.no_grad():
                    net.eval()
                    getFeatureFromTorch('./result/cur_epoch_lfw_result.mat', net, device, lfw_dataset, lfw_dataloader)
                    lfw_accuracy = evaluation_10_fold('./result/cur_epoch_lfw_result.mat')
                    lfw_accuracy = np.mean(lfw_accuracy) * 100             
                writer.add_scalar("Accuracy/test", lfw_accuracy, total_iters)
                _print(f'LFW Ave Accuracy: {lfw_accuracy.item():.4f}')
                if best_test_acc <= lfw_accuracy.item() :
                    best_test_acc = lfw_accuracy.item() 
                    best_test_iters = total_iters

                _print(f'Current Best Accuracy: test: {best_test_acc:.4f} in iters: {best_test_iters}')
                net.train()

    _print('Finally Best Accuracy: val: {:.4f} in iters: {}'.format(best_test_acc, best_test_iters))
    print('finishing training')
Esempio n. 32
0
def train(args):
    base_dir = args.base_dir
    dirs = init_dir(base_dir)
    init_log(dirs['log'])
    config_dir = args.config_dir
    copy_file(config_dir, dirs['data'])
    config = configparser.ConfigParser()
    config.read(config_dir)
    in_test, post_test = init_test_flag(args.test_mode)

    # init env
    env = init_env(config['ENV_CONFIG'])
    logging.info('Training: a dim %d, agent dim: %d' % (env.n_a, env.n_agent))

    # init step counter
    total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step'))
    test_step = int(config.getfloat('TRAIN_CONFIG', 'test_interval'))
    log_step = int(config.getfloat('TRAIN_CONFIG', 'log_interval'))
    global_counter = Counter(total_step, test_step, log_step)

    # init centralized or multi agent
    seed = config.getint('ENV_CONFIG', 'seed')

    if env.agent == 'ia2c':
        model = IA2C(env.n_s_ls,
                     env.n_a,
                     env.neighbor_mask,
                     env.distance_mask,
                     env.coop_gamma,
                     total_step,
                     config['MODEL_CONFIG'],
                     seed=seed)
    elif env.agent == 'ia2c_fp':
        model = IA2C_FP(env.n_s_ls,
                        env.n_a,
                        env.neighbor_mask,
                        env.distance_mask,
                        env.coop_gamma,
                        total_step,
                        config['MODEL_CONFIG'],
                        seed=seed)
    elif env.agent == 'ma2c_nc':
        model = MA2C_NC(env.n_s,
                        env.n_a,
                        env.neighbor_mask,
                        env.distance_mask,
                        env.coop_gamma,
                        total_step,
                        config['MODEL_CONFIG'],
                        seed=seed)
    else:
        model = None

    # disable multi-threading for safe SUMO implementation
    summary_writer = tf.summary.FileWriter(dirs['log'])
    trainer = Trainer(env,
                      model,
                      global_counter,
                      summary_writer,
                      in_test,
                      output_path=dirs['data'])
    trainer.run()

    # save model
    final_step = global_counter.cur_step
    logging.info('Training: save final model at step %d ...' % final_step)
    model.save(dirs['model'], final_step)

    # post-training test
    if post_test:
        test_dirs = init_dir(base_dir, pathes=['eva_data'])
        evaluator = Evaluator(env, model, test_dirs['eva_data'])
        evaluator.run()