def main():

    # Parse the ocmmand line arguments
    args = parse_args()

    # Set up the message logger
    logger.setup_logger('db_mqtt_demo', args.logging)

    # Instantiate the logger and run
    sdl = SensorDbLogger(args)
    sdl.run()
def main():

    # Parse the ocmmand line arguments
    args = parse_args()

    # Set up the message logger
    logger.setup_logger("db_mqtt_demo", args.logging)

    # Instantiate the logger and run
    rdl = RouterDbLogger(args)
    rdl.run()
예제 #3
0
import json
import logging.config

from kafka import KafkaConsumer

from logger import setup_logger

logging.config.dictConfig(setup_logger())
logger = logging.getLogger("app")


class Consumer(object):
    def __init__(self, topic, **kwargs):
        self.topic = topic
        self._consumer = None

        self.kwargs = kwargs
        self.kwargs['bootstrap_servers'] = self.kwargs.get(
            'bootstrap_servers', ['localhost:9092'])
        self.kwargs['api_version'] = self.kwargs.get('api_version', (
            0,
            10,
        ))

        self.connect()

    def __enter__(self):
        return self

    def __exit__(self, errortype, value, traceback):
        self.close()
예제 #4
0
import os
from typing import Dict, List, Tuple

import gym
import numpy as np
import torch
import torch.nn.functional as F

import gym_tictactoe  # NOQA
from agent import Agent
from logger import setup_logger

from .network import AlphaZeroNetwork
from .config import AlphaZeroConfig

logger = setup_logger(__name__, logging.INFO)
# logger = setup_logger(__name__, logging.DEBUG, "AlphaZero.log")


class Node:
    def __init__(self, id: int, player: int):
        self.id = id
        self.player = player
        self.edges: List[Edge] = []

    def expand(self,
               actions: List[int],
               policy: List[float],
               next_id: int = 1):
        actions = np.random.permutation(actions)  # ランダムにする必要があるか?
        for i, action in enumerate(actions):
예제 #5
0
from logger import setup_logger
from plot import plot_signal
from settings import *

logger = setup_logger()
예제 #6
0
def main():
    #argparse settings
    parser = argparse.ArgumentParser(
        description='PyTorch RadioML Example')  #400 and 0.001
    parser.add_argument('--batchsize',
                        type=int,
                        default=400,
                        metavar='N',
                        help='input batch size for training (default: 400)')
    parser.add_argument('--test_batchsize',
                        type=int,
                        default=400,
                        metavar='N',
                        help='input batch size for testing (default: 400)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.03,
                        metavar='LR',
                        help='learning rate (default: 0.03)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='Adam momentum (default: 0.9)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--data-dir',
        type=str,
        default="/home/yifei/complex_demo/data/RML2016.10a_dict.pkl",
        metavar='N',
        help='where data is stored')
    parser.add_argument('--train-id',
                        type=str,
                        default="/home/yifei/complex_demo/data/train_idx.npy",
                        metavar='N',
                        help='where train ids are stored')
    parser.add_argument('--test-id',
                        type=str,
                        default="/home/yifei/complex_demo/data/test_idx.npy",
                        metavar='N',
                        help='where test ids are stored')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    model = ManifoldNetComplex().to(device)
    #     model.load_state_dict(torch.load(save_path))

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    print("#Model Parameters: " + str(params))
    train_loader, test_loader, lbl, snrs, test_idx = data_prep(
        args.data_dir, args.train_id, args.test_id, args.batchsize,
        args.test_batchsize)
    print("Batch Size: " + str(args.batchsize))
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           eps=1e-8,
                           amsgrad=True)
    print("Learning Rate: " + str(args.lr))

    try:
        os.mkdir('./log')
    except:
        pass

    # For model saving purposes, initializes as 0
    # If accuracy higher than "higher" then saves the model
    highest = 0

    # The actual path to save
    save_path = None

    try:
        os.mkdir('./save')
    except:
        pass

    logger = setup_logger('MSTAR logger')
    logger.info(model)
    batches = [200, 400, 800]
    lrs = [0.03, 0.05, 0.005, 0.08, 0.1, 0.005, 0.001]

    batches = [100]
    lrs = [0.02]

    ws = [0, 0.5, 1, 5, 10]

    for w in ws:
        model = ManifoldNetComplex(5, w).cuda()

        logger.info(w)

        for i in batches:
            train_loader, test_loader, lbl, snrs, test_idx = data_prep(
                args.data_dir, args.train_id, args.test_id, args.batchsize,
                args.test_batchsize)
            logger.info("Batch Size: " + str(args.batchsize))
            for j in lrs:
                optimizer = optim.Adam(model.parameters(),
                                       lr=j,
                                       eps=1e-8,
                                       amsgrad=True)
                logger.info("Learning Rate: " + str(args.lr))
                for epoch in range(1, args.epochs + 1):

                    acc = test(args, model, device, test_loader, lbl, snrs,
                               test_idx, logger)
                    if acc > highest:
                        if save_path is not None:
                            try:
                                os.remove(save_path + '.ckpt')
                            except:
                                pass
                        highest = acc
                        save_path = os.path.join(
                            './save/',
                            '[{acc}]-[{batch}]-[{learning_rate}]-11class-model'
                            .format(acc=np.round(acc, 3),
                                    batch=i,
                                    learning_rate=j))
                        torch.save(model.state_dict(), save_path + '.ckpt')
                        logger.info(
                            'Saved model checkpoints into {}...'.format(
                                save_path))

                    train(args, model, device, train_loader, optimizer, epoch,
                          logger)

                logger.info("########## NEW MODEL ###########")
                model = ManifoldNetComplex().cuda()
예제 #7
0
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tqdm import tqdm

from logger import setup_logger
from util import save_image

log = setup_logger(__name__)


def get_optimizer(name, learning_rate):
    tfk = tf.keras
    optimizers = {
        "adam": tfk.optimizers.Adam(learning_rate),
        "nadam": tfk.optimizers.Nadam(learning_rate),
        "adamax": tfk.optimizers.Adamax(learning_rate),
    }

    log.info(f"Using optimizer {optimizers[name]}")

    return optimizers[name]


def create_conditional_model(config, image_shape, label_shape=()):
    # Create the model
    tfd = tfp.distributions
    tfk = tf.keras
    tfkl = tf.keras.layers
예제 #8
0

from settings import settings
from logger import setup_logger
from middleware import RequireJSON
from middleware import JSONTranslator
from middleware import AuthMiddleware
from middleware import CrossAllowOrigin
from routing import generate_routes

# development: 'gunicorn api:app --reload'
# production: 'gunicorn -w3 --certfile=server.crt --keyfile=server.key api:app'
sys.path.append(os.path.abspath('.'))


setup_logger(settings['log'])

app = falcon.API(middleware=[
    CrossAllowOrigin(),
    RequireJSON(),
    JSONTranslator(),
    AuthMiddleware(settings),
])

generate_routes(app, settings)





예제 #9
0
parser.add_argument('--split_file',
                    default='Kitti/object/train.txt',
                    help='save model')
parser.add_argument('--btrain', type=int, default=4)
parser.add_argument('--start_epoch', type=int, default=1)

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if not os.path.isdir(args.savemodel):
    os.makedirs(args.savemodel)
print(os.path.join(args.savemodel, 'training.log'))
log = logger.setup_logger(os.path.join(args.savemodel, 'training.log'))
import datetime
log.info(datetime.datetime.now())

all_left_img, all_right_img, all_left_disp, = ls.dataloader(
    args.datapath, args.split_file)
all_left_img_v, all_right_img_v, all_left_disp_v, = ls.dataloader(
    args.datapath, args.split_file.replace('train', 'val'))

TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
    all_left_img, all_right_img, all_left_disp, True),
                                             batch_size=args.btrain,
                                             shuffle=True,
                                             num_workers=14,
                                             drop_last=False)
TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
예제 #10
0
        self.p4g_main_send_lock = threading.RLock()

        self.gpsStableLedLock = threading.RLock()
        self.gpsLedLock = threading.RLock()
        self.laserLedLock = threading.RLock()
        self.gyroLedLock = threading.RLock()


my_lock = MyLock()


def gl_init():
    global _global_dict
    _global_dict = {}


def set_value(name, value):
    _global_dict[name] = value


def get_value(name, defValue=0):
    try:
        return _global_dict[name]
    except KeyError:
        return defValue


from logger import setup_logger
from color import *
log = setup_logger('logging.log')
예제 #11
0
from datetime import datetime
from time import sleep

from github import Github

from data_gathering import logger_config_values
from logger import setup_logger

logger = setup_logger(name=__name__,
                      file=logger_config_values['file'],
                      format=logger_config_values['format'],
                      level=logger_config_values['level'])


class NoAPICalls(Exception):
    pass


def wait_for_api_calls(git: Github, number_of_attempts: int = 3) -> None:
    for i in range(number_of_attempts):
        waiting_time = time_to_wait(timestamp=git.rate_limiting_resettime) + 30
        logger.info(msg=f'Waiting for {waiting_time} seconds')
        sleep(waiting_time)

        api_calls = git.get_rate_limit().core.remaining
        if api_calls > 0:
            logger.debug(msg=f'Available {api_calls} API calls')
            return
        else:
            logger.debug(msg=f'No API calls received in attempt {i} /' +
                         '{number_of_attempts}')
예제 #12
0
    arg('--ricap', action='store_true', help='use ricap')

    arg('--pseudo_file', type=str, default='pseudo_99.csv')

    arg('--dataset_type',
        type=str,
        default='pillow',
        help='choose from [pillow,cv2].')

    args = parser.parse_args()

    IMAGE_FOLDER = args.root + '/images/'
    os.makedirs(args.run_root, exist_ok=True)
    N_CLASSES = 4

    logger = setup_logger("plant pathology", args.run_root, 0)
    logger.info(args)

    if args.cutmix:
        # print('=> using cutmix.')
        logger.info('=> using cutmix.')

    if args.mixup:
        # print('=> using mixup.')
        logger.info('=> using mixup.')

    if args.specific_mixup:
        # print('=> using specific_mixup.')
        logger.info('=> using specific_mixup.')

    if args.ricap:
예제 #13
0
    if not isinstance(env.action_space, gym.spaces.Box) \
            and not isinstance(env.observation_space, gym.spaces.Box):
        raise ValueError("SAC algorithm only works in environments with "
                         "continuous observation-action spaces.")

    expt_variant = {
        'algo_name': 'sac',
        'algo_params': algo_hyperparams,
        'env_name': args.env_name,
    }

    log_dir = setup_logger(
        exp_prefix='sac',
        seed=args.seed,
        variant=expt_variant,
        snapshot_mode='last',
        snapshot_gap=10,
        log_dir='training_logs',
        log_stdout=not args.no_log_stdout,
    )

    sac = SAC(env, **algo_hyperparams)

    # Training process
    expected_accum_rewards = sac.train()

    # Plot the expected accum. rewards obtained during the learning process
    plt.plot(expected_accum_rewards)
    plt.show(block=False)
    plt.savefig('expected_accum_rewards.png')
예제 #14
0
파일: train_wrn.py 프로젝트: yyht/HDGE
        overwrite.exp_prefix = exp_prefix
        overwrite.log_dir = f"{dirname(dirname(abspath(args.load_path)))}/{exp_prefix}"

        # You may want to change seed, SGLD steps when restart after crashing
        # helpful to stablize log q(y|x) + log q(x) and log q(y|x) + log q(x|y) + log q(x)
        overwrite.seed = args.seed
        overwrite.n_steps = args.n_steps
        overwrite.start_epoch = args.start_epoch if args.start_epoch > 0 else overwrite.start_epoch
        overwrite.warmup_iters = args.warmup_iters
        overwrite.workers = args.workers
        args = overwrite
    else:
        exp_prefix = f"{args.id}-{uuid.uuid4().hex}"
        args.exp_prefix = exp_prefix
        args.log_dir = f"{args.log_dir}/{exp_prefix}"

    args.plot_contrast = 1 if (args.pxycontrast > 0
                               and args.plot_contrast) else 0
    args.n_classes = 100 if args.dataset == "cifar100" else 10
    set_seed(args.seed)
    os.makedirs(args.log_dir, exist_ok=True)
    configs = OrderedDict(sorted(vars(args).items(), key=lambda x: x[0]))
    setup_logger(exp_prefix=args.exp_prefix,
                 variant=configs,
                 log_dir=args.log_dir)
    with open(f"{args.log_dir}/params.txt", "w") as f:
        json.dump(args.__dict__, f)
    sys.stdout = open(f"{args.log_dir}/log.txt", "a")

    main(args)
예제 #15
0
"""

__author__ = "Robert Young"
__version__ = "0.1.0"
__license__ = "GPL3"

import argparse
from urllib import parse, request
import pathlib
from subprocess import check_call, getoutput

from bs4 import BeautifulSoup

from logger import setup_logger

logger = setup_logger(logfile="log.txt")

PDF_DIR = pathlib.Path('pdfs')
VIDEO_DIR = pathlib.Path('videos')


def get_links(url):
    """Get pdf and youtube links from page."""
    url_base = url.rstrip('lec.html')

    html = request.urlopen(url)
    logger.info("Reading html")
    soup = BeautifulSoup(html.read(), 'lxml')
    links = soup.find_all('a')

    doc_links = [url_base + link['href']
batch_size = 32
fold_id = 0
epochs = 20
EXP_ID = "exp11_seres"
model_path = None
n_tta = 4
save_path = '{}_fold{}.pth'.format(EXP_ID, fold_id)
npy_path = [
    "../input/exp4-kaggle-days-multi-seres/y_pred.npy",
    "../input/kaggle-days-exp4-fold1/y_pred.npy",
    "../input/kaggle-days-exp4-fold2/y_pred.npy",
    "../input/exp4kaggledaysmultiseres-fold3-result/y_pred.npy",
    "../input/exp4kaggledaysmultiseres-fold4-result/y_pred.npy",
]

setup_logger(out_file=LOGGER_PATH)
seed_torch(SEED)
LOGGER.info("seed={}".format(SEED))


@contextmanager
def timer(name):
    t0 = time.time()
    yield
    LOGGER.info('[{}] done in {} s'.format(name, round(time.time() - t0, 2)))


def main():
    with timer('load data'):
        df = pd.read_csv(TRAIN_PATH)
        df["loc_x"] = df["loc_x"] / 100
예제 #17
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import novaclient.client as nova
from novaclient.exceptions import *
import logger
import socket
import time
import os

LOGGER = logger.setup_logger("instances")

def get_nova_client(config=os.environ):
    LOGGER.info("Initializing a new OpenStack client")
    client = nova.Client(2,
        config['OS_USERNAME'],
        config['OS_PASSWORD'],
        project_id=config['OS_TENANT_NAME'],
        auth_url=config['OS_AUTH_URL'],
        region_name=config['OS_REGION_NAME']
    )
    return client

def check_pubkey(name, path, client=None):
    LOGGER.info("Checking for existance of the public key %s", name)
    if not client:
        client = get_nova_client()
    try:
        key = client.keypairs.find(name=name)
        LOGGER.debug("Key %s exists", name)
        with open(path, 'r') as keyfile:
예제 #18
0
    fig.tight_layout()
    fig.savefig('temp' + str(acc) + '.png', dpi=fig.dpi)
    fig.savefig('temp' + str(acc) + '.eps', dpi=fig.dpi, format='eps')


# Parameters for data loading
params_train = {'shuffle': False, 'num_workers': 1}

params_val = {
    'batch_size': Params_dict['test_batch'],
    'shuffle': False,
    'num_workers': 1
}

max_epochs = Params_dict['max_epochs']
logger = setup_logger('JS logger')
model_name = 'MSTAR'
logger.info(model_name)
logger.info(str(Params_dict))
batches = Params_dict['batches']
lrs = Params_dict['lrs']

batches = [100, 200, 300, 500]
lrs = [0.008, 0.01]
save_path = None
torch.manual_seed(42222222)
np.random.seed(42222222)
# np.random.seed(42222222)
distr = [5]
for ss in distr:
    for b in batches:
예제 #19
0
def train():
    args = parse_args()
    dist.init_process_group(
                backend='nccl',
                world_size=torch.cuda.device_count()
                )
    local_rank = torch.distributed.get_rank()
    torch.cuda.set_device(local_rank)
    device = torch.device("cuda", local_rank)                
    setup_logger(respth)

    # dataset
    n_classes = 19
    n_img_per_gpu = 8
    n_workers = 4
    cropsize = [1024, 1024]
    ds = CityScapes('../data/cityscapes', cropsize=cropsize, mode='train')
    sampler = torch.utils.data.distributed.DistributedSampler(ds)
    dl = DataLoader(ds,
                    batch_size=n_img_per_gpu,
                    sampler=sampler,
                    shuffle=False,
                    num_workers=n_workers,
                    pin_memory=True,
                    drop_last=True)

    logger.info('successful load data')

    ignore_idx = 255
    net = AttaNet(n_classes=n_classes)
    if not args.ckpt is None:
        net.load_state_dict(torch.load(args.ckpt, map_location='cpu'))
        logger.info('successful load weights')
    net.cuda(device)
    net.train()
    net = torch.nn.parallel.DistributedDataParallel(net, find_unused_parameters=True,
                                                    device_ids=[local_rank],
                                                    output_device=local_rank)
    logger.info('successful distributed')
    score_thres = 0.7
    n_min = cropsize[0]*cropsize[1]//2
    criteria_p = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
    criteria_aux1 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
    criteria_aux2 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)

    # optimizer
    momentum = 0.9
    weight_decay = 5e-4
    lr_start = 1e-2
    max_iter = 200000
    power = 0.9
    warmup_steps = 1000
    warmup_start_lr = 1e-5
    optim = Optimizer(
            model=net.module,
            lr0=lr_start,
            momentum=momentum,
            wd=weight_decay,
            warmup_steps=warmup_steps,
            warmup_start_lr=warmup_start_lr,
            max_iter=max_iter,
            power=power)

    # train loop
    msg_iter = 50
    loss_avg = []
    st = glob_st = time.time()
    diter = iter(dl)
    epoch = 0
    for it in range(max_iter):
        try:
            im, lb = next(diter)
            if not im.size()[0] == n_img_per_gpu: raise StopIteration
        except StopIteration:
            epoch += 1
            sampler.set_epoch(epoch)
            diter = iter(dl)
            im, lb = next(diter)
        im = im.cuda()
        lb = lb.cuda()
        H, W = im.size()[2:]
        lb = torch.squeeze(lb, 1)

        optim.zero_grad()
        out, out16, out32 = net(im)
        lossp = criteria_p(out, lb)
        loss1 = criteria_aux1(out16, lb)
        loss2 = criteria_aux2(out32, lb)
        loss = lossp + loss1 + loss2
        loss.backward()
        optim.step()

        loss_avg.append(loss.item())
        # print training log message
        if (it+1) % msg_iter == 0:
            loss_avg = sum(loss_avg) / len(loss_avg)
            lr = optim.lr
            ed = time.time()
            t_intv, glob_t_intv = ed - st, ed - glob_st
            eta = int((max_iter - it) * (glob_t_intv / it))
            eta = str(datetime.timedelta(seconds=eta))
            msg = ', '.join([
                    'it: {it}/{max_it}',
                    'lr: {lr:4f}',
                    'loss: {loss:.4f}',
                    'eta: {eta}',
                    'time: {time:.4f}',
                ]).format(
                    it=it+1,
                    max_it=max_iter,
                    lr=lr,
                    loss=loss_avg,
                    time=t_intv,
                    eta=eta
                )
            logger.info(msg)
            loss_avg = []
            st = ed

    save_pth = osp.join(args.snapshot_dir, 'model_final.pth')
    net.cpu()
    state = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
    if dist.get_rank() == 0:
        torch.save(state, save_pth)
    logger.info('training done, model saved to: {}'.format(save_pth))
        print('eval done')

        time.sleep(5)


# Main
def main():
    #log
    time.sleep(1)


if __name__ == '__main__':

    # Logger setup
    logger = setup_logger("master", "fatman_master.log", logging.DEBUG)

    # Parser
    config = ConfigParser()

    # Updated config flag
    conf_data = init_enviroment_config(config)

    conf_data = {
        "temp": "0.0",
        "tempUnits": "C",
        "humidity": "60.0",
        "elapsed": "0",
        "mode": "auto",
        "fan_state": "on",
        "light_state": "on",
예제 #21
0
import sys
from os import sep
from logger import logger, setup_logger
from ga_path_generator import Genetic
from Maze import Maze
from a_star import a_star
from errors import NoEndPositionError

try:
    filename = sys.argv[1]
    setup_logger(filename)
    g = Genetic(*sys.argv[1:])
except (TypeError, IndexError) as e:
    print(e)
    print(
        'Uso: {} {} arquivo_de_labirinto tamanho_populacao [tamanho_cromossomo] [taxa_mutacao]'
        .format(sys.executable.split(sep)[-1], sys.argv[0]))
    quit()

solver = g.run()
m = Maze(filename, solver[1].path, True, 4)
c = m.walk()
print()
print('Geração: {}'.format(solver[0]))
print('Caminho: {}'.format(solver[1].positions))
print('Score: {}'.format(solver[1].score))
print('Saída: {}'.format(solver[1].solution))
print('Tempo executando: {:.3f} segundos'.format(g.elapsed_time))
print()
logger.info('Tempo executando: {:.3f} segundos'.format(g.elapsed_time))
logger.info('')
예제 #22
0
def arg_parse():
    parser = argparse.ArgumentParser()
    parser.add_argument("--network",
                        help="Hidden layer activation function",
                        choices=['keras', 'mantas'],
                        type=str,
                        default='mantas')
    parser.add_argument("--data",
                        help="Hidden layer activation function",
                        choices=['mackey', 'wind', 'weather', 'chest', 'sine'],
                        type=str,
                        default='mackey')
    return parser.parse_args()


log = logger.setup_logger(__name__)


def main():
    config = arg_parse()
    log.info("Starting...")
    log.info("An echo state network will be run on the following dataset:")
    log.info(config.data)

    input_data = create_dataset(config.data)

    if config.network == 'mantas':
        echo_state_network(input_data, 9, 1000)
        # echo_state_network(input_data, .006, 5000)
        # for i in range(5,10):
        #     echo_state_network(input_data, i, 1000)
def main(experiment_name,
         marked_images_directory,
         optimizer,
         lr_scheduler=None,
         epochs=150,
         batch_size=512,
         num_workers=1):
    """ 
    Basically a straight copy of our resnet18_on_cifar10.py example. Only difference is we make use
    of a training dataset with certain examples replaced by marked alternatives.
    Just run tensorboard from the same directory to see training results.
    """

    output_directory_root = "experiments/radioactive"
    output_directory = os.path.join(output_directory_root, experiment_name)
    if not os.path.isdir(output_directory):
        os.makedirs(output_directory, exist_ok=True)

    # Setup regular log file
    logfile_path = os.path.join(output_directory, "logfile.txt")
    setup_logger(logfile_path)

    # Setup TensorBoard logging
    tensorboard_log_directory = os.path.join("runs", experiment_name)
    shutil.rmtree(tensorboard_log_directory, ignore_errors=True)
    tensorboard_summary_writer = SummaryWriter(
        log_dir=tensorboard_log_directory)

    # Choose Training Device
    use_cuda = torch.cuda.is_available()
    logger.info(f"CUDA Available? {use_cuda}")
    device = "cuda" if use_cuda else "cpu"

    # Datasets and Loaders
    train_set_loader, test_set_loader = get_data_loaders(
        marked_images_directory, batch_size, num_workers)

    # Create Model & Optimizer
    model = torchvision.models.resnet18(pretrained=False, num_classes=10)
    model.to(device)
    optimizer = optimizer(model.parameters())
    if lr_scheduler:
        lr_scheduler = lr_scheduler(optimizer)

    logger.info("=========== Commencing Training ===========")
    logger.info(f"Epoch Count: {epochs}")
    logger.info(f"Batch Size: {batch_size}")

    # Load Checkpoint
    checkpoint_file_path = os.path.join(output_directory, "checkpoint.pth")
    start_epoch = 0
    if os.path.exists(checkpoint_file_path):
        logger.info("Checkpoint Found - Loading!")

        checkpoint = torch.load(checkpoint_file_path)
        logger.info(f"Last completed epoch: {checkpoint['epoch']}")
        logger.info(f"Average Train Loss: {checkpoint['train_loss']}")
        logger.info(f"Top-1 Train Accuracy: {checkpoint['train_accuracy']}")
        logger.info(f"Top-1 Test Accuracy: {checkpoint['test_accuracy']}")
        start_epoch = checkpoint["epoch"] + 1
        logger.info(f"Resuming at epoch {start_epoch}")

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        if lr_scheduler:
            lr_scheduler.load_state_dict(checkpoint["lr_scheduler_state_dict"])
    else:
        logger.info("No checkpoint found, starting from scratch.")

    # Training Loop
    t = Timer()
    for epoch in range(start_epoch, epochs):
        t.start()
        logger.info("-" * 10)
        logger.info(f"Epoch {epoch}")
        logger.info("-" * 10)

        # Train
        train_loss, train_accuracy = train_model(device, model,
                                                 train_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("train_loss", train_loss, epoch)
        tensorboard_summary_writer.add_scalar("train_accuracy", train_accuracy,
                                              epoch)

        # Test
        test_accuracy = test_model(device, model, test_set_loader, optimizer)
        tensorboard_summary_writer.add_scalar("test_accuracy", test_accuracy,
                                              epoch)

        scheduler_dict = None
        if lr_scheduler:
            lr_scheduler.step()
            scheduler_dict = lr_scheduler.state_dict()

        # Save Checkpoint
        logger.info("Saving checkpoint.")
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'lr_scheduler_state_dict': scheduler_dict,
                'train_loss': train_loss,
                'train_accuracy': train_accuracy,
                'test_accuracy': test_accuracy
            }, checkpoint_file_path)

        elapsed_time = t.stop()
        logger.info(f"End of epoch {epoch}, took {elapsed_time:0.4f} seconds.")
        logger.info(f"Average Train Loss: {train_loss}")
        logger.info(f"Top-1 Train Accuracy: {train_accuracy}")
        logger.info(f"Top-1 Test Accuracy: {test_accuracy}")
        logger.info("")
예제 #24
0
def train():
    args = parse_args()
    torch.cuda.set_device(args.local_rank)
    dist.init_process_group(backend='nccl',
                            init_method='tcp://127.0.0.1:33241',
                            world_size=torch.cuda.device_count(),
                            rank=args.local_rank)
    setup_logger(respth)

    ## dataset
    n_classes = 19
    n_img_per_gpu = 8
    n_workers = 4
    cropsize = [1024, 1024]
    ds = CityScapes('./data', cropsize=cropsize, mode='train')
    sampler = torch.utils.data.distributed.DistributedSampler(ds)
    dl = DataLoader(ds,
                    batch_size=n_img_per_gpu,
                    shuffle=False,
                    sampler=sampler,
                    num_workers=n_workers,
                    pin_memory=True,
                    drop_last=True)

    ## model
    ignore_idx = 255
    net = BiSeNet(n_classes=n_classes)
    net.cuda()
    net.train()
    net = nn.parallel.DistributedDataParallel(net,
                                              device_ids=[
                                                  args.local_rank,
                                              ],
                                              output_device=args.local_rank)
    score_thres = 0.7
    n_min = n_img_per_gpu * cropsize[0] * cropsize[1] // 16
    LossP = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
    Loss2 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)
    Loss3 = OhemCELoss(thresh=score_thres, n_min=n_min, ignore_lb=ignore_idx)

    ## optimizer
    momentum = 0.9
    weight_decay = 5e-4
    lr_start = 1e-2
    max_iter = 80000
    power = 0.9
    warmup_steps = 1000
    warmup_start_lr = 1e-5
    optim = Optimizer(model=net.module,
                      lr0=lr_start,
                      momentum=momentum,
                      wd=weight_decay,
                      warmup_steps=warmup_steps,
                      warmup_start_lr=warmup_start_lr,
                      max_iter=max_iter,
                      power=power)

    ## train loop
    msg_iter = 50
    loss_avg = []
    st = glob_st = time.time()
    diter = iter(dl)
    epoch = 0
    for it in range(max_iter):
        try:
            im, lb = next(diter)
            if not im.size()[0] == n_img_per_gpu: raise StopIteration
        except StopIteration:
            epoch += 1
            sampler.set_epoch(epoch)
            diter = iter(dl)
            im, lb = next(diter)
        im = im.cuda()
        lb = lb.cuda()
        H, W = im.size()[2:]
        lb = torch.squeeze(lb, 1)

        optim.zero_grad()
        out, out16, out32 = net(im)
        lossp = LossP(out, lb)
        loss2 = Loss2(out16, lb)
        loss3 = Loss3(out32, lb)
        loss = lossp + loss2 + loss3
        loss.backward()
        optim.step()

        loss_avg.append(loss.item())
        ## print training log message
        if (it + 1) % msg_iter == 0:
            loss_avg = sum(loss_avg) / len(loss_avg)
            lr = optim.lr
            ed = time.time()
            t_intv, glob_t_intv = ed - st, ed - glob_st
            eta = int((max_iter - it) * (glob_t_intv / it))
            eta = str(datetime.timedelta(seconds=eta))
            msg = ', '.join([
                'it: {it}/{max_it}',
                'lr: {lr:4f}',
                'loss: {loss:.4f}',
                'eta: {eta}',
                'time: {time:.4f}',
            ]).format(it=it + 1,
                      max_it=max_iter,
                      lr=lr,
                      loss=loss_avg,
                      time=t_intv,
                      eta=eta)
            logger.info(msg)
            loss_avg = []
            st = ed

    ## dump the final model
    save_pth = osp.join(respth, 'model_final_diss.pth')
    net.cpu()
    state = net.module.state_dict() if hasattr(net,
                                               'module') else net.state_dict()
    if dist.get_rank() == 0: torch.save(state, save_pth)
    logger.info('training done, model saved to: {}'.format(save_pth))
예제 #25
0
from __future__ import print_function
import logging

from logger import setup_logger
from proxmox import Proxmox
from confluence import ConfluenceClient

setup_logger()
logging.getLogger("proxmoxer.core").setLevel(logging.ERROR)
logger = logging.getLogger(__name__)

if __name__ == '__main__':
    proxmox = Proxmox()
    client = ConfluenceClient()
    results = proxmox.get_stats()
    client.put_results(results)
    client.close()
        cor_avg += pearsonr(data_values[:, i], reconstructed_data[:, i])[0]
    cor_avg /= data.shape[1]
    print(cor_avg)
    print(data.shape[1])

    pd.DataFrame(data=encoded_data.detach().numpy().T,
                 columns=data.index).to_csv(
                     "output4/autoencoder/tumor/data/encoded_5.csv")
    # pd.DataFrame(data=reconstructed_data.T, columns=data.index).to_csv(
    #     "output4/autoencoder/tumor/data/reconstructed_3.csv")
    # path = "output4/autoencoder/data/encoded_data_2"
    # pd.DataFrame(data=encoded_data, columns=data.colums)


if __name__ == "__main__":
    logger = lg.setup_logger("autoencoder")
    tumor_output_reduction()
    # expression_executor = get_expression_dependency_executor(epochs_source=range(10, 201, 10), hd=[500, 500])
    # expression_executor.run_source_test()
    # dependency_executor = get_expression_dependency_executor(epochs_target=range(10, 201, 10), hd=[300, 300])
    # dependency_executor.run_target_test()
    # tumor_reduction(hd=[2000, 4], epochs=range(10, 501, 10), name="second")
    # tumor_output_reduction()
    # tumor_reduction(hd=[2000, 3], epochs=range(10, 501, 10), name="second")
    # tumor_reduction(hd=[2000, 1], epochs=range(10, 201, 10), name="second")
    # drug_exe = get_drug_executor()
    # drug_exe.run_test()
    # test_encoders_dep(epochs_target=range(10, 201, 10), skip_source=True)
    # test_encoders()
    # save_encoders_program()
    # cca_between_encoded("output/cca/sklearn500")
예제 #27
0
def main():
    """Main Entry point for {{cookiecutter.project_name}}"""
    log = logger.setup_logger("{{ cookiecutter.project_slug }}", "{{ cookiecutter.project_slug }}.log")
예제 #28
0
def main():
    logger = setup_logger('manifold Dual Glow from DTI to ODF')
    parser = argparse.ArgumentParser()
    """path to store input/ output file"""
    parser.add_argument("--dset",
                        type=str,
                        default='../HCP_processed/',
                        required=False)
    """args for training"""
    parser.add_argument("--num-epochs",
                        help="number of epochs",
                        type=int,
                        default=10000,
                        required=False)
    parser.add_argument("--learning-rate",
                        "-lr",
                        help="learning rate of the model",
                        type=float,
                        default=1e-6,
                        required=False)
    parser.add_argument("--batch-size",
                        "-b",
                        help="batch size of training samples",
                        type=int,
                        default=32,
                        required=False)
    parser.add_argument("--best-err",
                        "-err",
                        type=float,
                        default=1000,
                        required=False)

    args = parser.parse_args()
    logger.info("call with args: \n{}".format(pprint.pformat(vars(args))))
    logger.info("model info {}".format(model.info))

    ####################################################################################################################
    """load train and test set"""
    train_dataset = Dataset_GLOW(data_path=args.dset, train=True)
    train_dataloader = DataLoaderX(train_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=4)

    test_dataset = Dataset_GLOW(data_path=args.dset, train=False)
    test_dataloader = DataLoaderX(test_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=False,
                                  num_workers=4)

    print('finish loading the data')

    Model = model.Glow().cuda()
    cudnn.benchmark = True
    Model = torch.nn.DataParallel(Model)
    print('finish init the model')

    optimizer = torch.optim.Adam(Model.parameters(),
                                 lr=args.learning_rate,
                                 weight_decay=0.001,
                                 amsgrad=True)

    print('# params', sum(x.numel() for x in Model.parameters()))

    # load the pre-trained checkpoint
    # can be commented out to train from scratch
    checkpoints = '../models/DTI2ODF-3D-pretrain.pth'
    ckpts = torch.load(checkpoints)
    Model.load_state_dict(ckpts['model_state_dict'])
    optimizer.load_state_dict(ckpts['optimizer_state_dict'])
    del ckpts

    ####################################################################################################################

    best_err = args.best_err
    now_best_err = 1000
    with torch.autograd.set_detect_anomaly(True):
        for epoch in range(args.num_epochs):
            b = 0

            Model.train()
            for data in train_dataloader:
                dti, eig, odf, msk = data
                odf = odf.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 362]
                dti = dti.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 3, 3]
                eig = eig.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 3]
                msk = msk.unsqueeze(1).cuda()

                logdet, logpz, odff, dtii, eigg = Model(odf, dti, eig)

                loss = logdet + logpz * 0.001  # NOTE
                loss = -loss.mean()
                odf_, dti_, eig_ = Model(odff,
                                         dtii,
                                         eigg,
                                         extra=8,
                                         reverse=True)

                edti = (msk.unsqueeze(-1).unsqueeze(-1) *
                        ((dti - dti_)**2)).sum()
                eodf = (msk.unsqueeze(-1) * ((odf - odf_)**2)).sum()
                eeig = (msk.unsqueeze(-1) * ((eig - eig_)**2)).sum()

                logger.info(
                    "Epoch [{}/{}], Iter [{}] Loss: {:.3f}  ldet: {:.3f}  lz: {:.3f} reconstruct: dti {:.3f} odf {:.3f} eig {:.3f}"
                    .format(epoch + 1, args.num_epochs, b + 1, loss.item(),
                            -logdet.mean().item(), -logpz.mean().item(),
                            edti.item(), eodf.item(), eeig.item()))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                b += 1

            Model.eval()
            errs = 0
            count = 0
            with torch.no_grad():
                for data in train_dataloader:
                    dti, eig, odf, msk = data
                    odf = odf.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 362]
                    dti = dti.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 3, 3]
                    eig = eig.unsqueeze(1).cuda()  # [B, 1, 32, 32, 32, 3]
                    msk = msk.unsqueeze(1).cuda()

                    rodf = Model(dti, eig, None, reconstruct=True)

                    err = (msk.unsqueeze(-1) * (rodf - odf)**2).sum() / (
                        msk.sum())  # NOTE hard code
                    err = err.sum()
                    errs += err
                errs = errs / len(train_dataset)
                logger.info(
                    "Epoch [{0}/{1}], Reconstruction Loss: {2:.3f}".format(
                        epoch + 1, args.num_epochs, errs.item()))

            if errs < now_best_err:
                now_best_err = errs

            if errs < best_err:
                best_err = errs

                torch.save(
                    {
                        'model_state_dict': Model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                    }, '../models/{}-{}.pth'.format(model.info, errs.item()))
예제 #29
0
from logger import setup_logger

from asyncio.tasks import gather
import os

logger = setup_logger(logger_name=os.path.basename(__file__).split(".")[0])


async def buffered_gather(promises_array: list) -> list:

    """Buffered Gather

    This function recives a list of promiese, and then make the requiered tasks using a buffer defined by an
    environment variable.

    Args:
        promises_array (list): A list with promises.

    Returns:
        list: A list with the promiese solution.
    """

    buffer_size = int(os.environ["BUFFER_SIZE"])
    buffered_promises_array = list()
    responses = list()

    logger.debug(msg=f"starting buffer of {buffer_size} parallel tasks")

    for index in range(0, len(promises_array), buffer_size):
        buffered_promises_array.append(promises_array[index : index + buffer_size])
# evaluation metrics
metric = SegmentationMetric(train_set.num_class)

# declare optimizer
optimizer = torch.optim.Adam(deeplab_model.parameters(),
                             lr=learning_rate,
                             weight_decay=1e-5)

# set device
device = 'cuda'
model = deeplab_model.to(device)
# set logger
logger = setup_logger("semantic_segmentation",
                      '/home/JinK/coco/runs/logs',
                      get_rank(),
                      filename='{}_{}_train_log.txt'.format(
                          model.__class__.__name__, 'ResNet50'),
                      mode='a+')

start_time = time.time()

for ep in range(epochs):

    model.train()
    train_loss = 0

    with tqdm(total=len(train_loader.dataset)) as progress_bar:
        for i, (images, targets, _) in enumerate(train_loader):
            optimizer.zero_grad()

            images = images.to(device)
예제 #31
0
from webgui.views import AdminIndexView, BlankView
from webgui.user import User

from comms import find_and_load_comms
from logger import setup_logger
from services import find_and_load_services
from stats import BotStats
from utils.config import load_config

CONFIG_FOLDER = dirname(dirname(abspath(__file__)))

config = load_config(CONFIG_FOLDER)
application = Flask(__name__)
bot_stats = BotStats()
log = setup_logger()

dsn = 'https://*****:*****@sentry.io/245538'
if config.get('global', {}).get('enable_sentry', True):
    sentry = Sentry(application, dsn=dsn)


def get_services(project_service_config=None):
    services = getattr(g, "_services", None)
    if services is None:
        services = g._services = find_and_load_services(config, project_service_config)
    return services


def get_comms():
    comms = getattr(g, "_comms", None)
예제 #32
0
    net.cuda()
    save_pth = osp.join('res/cp', cp)
    net.load_state_dict(torch.load(save_pth))
    net.eval()

    to_tensor = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    with torch.no_grad():
        for image_path in os.listdir(dspth):
            img = Image.open(osp.join(dspth, image_path))
            image = img.resize((512, 512), Image.BILINEAR)
            img = to_tensor(image)
            img = torch.unsqueeze(img, 0)
            img = img.cuda()
            out = net(img)[0]
            parsing = out.squeeze(0).cpu().numpy().argmax(0)

            vis_parsing_maps(image, parsing, stride=1, save_im=True, save_path=osp.join(respth, image_path))







if __name__ == "__main__":
    setup_logger('./res')
    evaluate()
예제 #33
0
def main():
    global best_RMSE

    lw = utils_func.LossWise(args.api_key, args.losswise_tag, args.epochs - 1)
    # set logger
    log = logger.setup_logger(os.path.join(args.save_path, 'training.log'))
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ': ' + str(value))

    # set tensorboard
    writer = SummaryWriter(args.save_path + '/tensorboardx')

    # Data Loader
    if args.dataset == 'kitti':
        train_data, val_data = KITTILoader3D.dataloader(
            args.datapath,
            args.split_train,
            args.split_val,
            kitti2015=args.kitti2015)
        TrainImgLoader = torch.utils.data.DataLoader(
            KITTILoader_dataset3d.myImageFloder(train_data,
                                                True,
                                                kitti2015=args.kitti2015,
                                                dynamic_bs=args.dynamic_bs),
            batch_size=args.btrain,
            shuffle=True,
            num_workers=16,
            drop_last=False,
            pin_memory=True)
        TestImgLoader = torch.utils.data.DataLoader(
            KITTILoader_dataset3d.myImageFloder(val_data,
                                                False,
                                                kitti2015=args.kitti2015,
                                                dynamic_bs=args.dynamic_bs),
            batch_size=args.bval,
            shuffle=False,
            num_workers=16,
            drop_last=False,
            pin_memory=True)
    else:
        train_data, val_data = listflowfile.dataloader(args.datapath)
        TrainImgLoader = torch.utils.data.DataLoader(
            SceneFlowLoader.myImageFloder(train_data,
                                          True,
                                          calib=args.calib_value),
            batch_size=args.btrain,
            shuffle=True,
            num_workers=8,
            drop_last=False)
        TestImgLoader = torch.utils.data.DataLoader(
            SceneFlowLoader.myImageFloder(val_data,
                                          False,
                                          calib=args.calib_value),
            batch_size=args.bval,
            shuffle=False,
            num_workers=8,
            drop_last=False)

    # Load Model
    if args.data_type == 'disparity':
        model = disp_models.__dict__[args.arch](maxdisp=args.maxdisp)
    elif args.data_type == 'depth':
        model = models.__dict__[args.arch](maxdepth=args.maxdepth,
                                           maxdisp=args.maxdisp,
                                           down=args.down)
    else:
        log.info('Model is not implemented')
        assert False

    # Number of parameters
    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model = nn.DataParallel(model).cuda()
    torch.backends.cudnn.benchmark = True

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    scheduler = MultiStepLR(optimizer,
                            milestones=args.lr_stepsize,
                            gamma=args.lr_gamma)

    if args.pretrain:
        if os.path.isfile(args.pretrain):
            log.info("=> loading pretrain '{}'".format(args.pretrain))
            checkpoint = torch.load(args.pretrain)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            log.info('[Attention]: Do not find checkpoint {}'.format(
                args.pretrain))

    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            args.start_epoch = checkpoint['epoch']
            optimizer.load_state_dict(checkpoint['optimizer'])
            best_RMSE = checkpoint['best_RMSE']
            scheduler.load_state_dict(checkpoint['scheduler'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info('[Attention]: Do not find checkpoint {}'.format(
                args.resume))

    # evaluation
    if args.evaluate:
        evaluate_metric = utils_func.Metric()
        ## training ##
        for batch_idx, (imgL_crop, imgR_crop, disp_crop_L,
                        calib) in enumerate(TestImgLoader):
            start_time = time.time()
            test(imgL_crop, imgR_crop, disp_crop_L, calib, evaluate_metric,
                 optimizer, model)

            log.info(
                evaluate_metric.print(batch_idx, 'EVALUATE') +
                ' Time:{:.3f}'.format(time.time() - start_time))
        import sys
        sys.exit()

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()

        ## training ##
        train_metric = utils_func.Metric()
        tqdm_train_loader = tqdm(TrainImgLoader, total=len(TrainImgLoader))
        for batch_idx, (imgL_crop, imgR_crop, disp_crop_L,
                        calib) in enumerate(tqdm_train_loader):
            # start_time = time.time()
            train(imgL_crop, imgR_crop, disp_crop_L, calib, train_metric,
                  optimizer, model)
            # log.info(train_metric.print(batch_idx, 'TRAIN') + ' Time:{:.3f}'.format(time.time() - start_time))
        log.info(train_metric.print(0, 'TRAIN Epoch' + str(epoch)))
        train_metric.tensorboard(writer, epoch, token='TRAIN')
        lw.update(train_metric.get_info(), epoch, 'Train')

        ## testing ##
        is_best = False
        if epoch == 0 or ((epoch + 1) % args.eval_interval) == 0:
            test_metric = utils_func.Metric()
            tqdm_test_loader = tqdm(TestImgLoader, total=len(TestImgLoader))
            for batch_idx, (imgL_crop, imgR_crop, disp_crop_L,
                            calib) in enumerate(tqdm_test_loader):
                # start_time = time.time()
                test(imgL_crop, imgR_crop, disp_crop_L, calib, test_metric,
                     optimizer, model)
                # log.info(test_metric.print(batch_idx, 'TEST') + ' Time:{:.3f}'.format(time.time() - start_time))
            log.info(test_metric.print(0, 'TEST Epoch' + str(epoch)))
            test_metric.tensorboard(writer, epoch, token='TEST')
            lw.update(test_metric.get_info(), epoch, 'Test')

            # SAVE
            is_best = test_metric.RMSELIs.avg < best_RMSE
            best_RMSE = min(test_metric.RMSELIs.avg, best_RMSE)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_RMSE': best_RMSE,
                'scheduler': scheduler.state_dict(),
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            epoch,
            folder=args.save_path)
    lw.done()
예제 #34
0
# -*- coding: utf-8 -*-
import time

import quickfix as fix
import quickfix44 as fix44

import argparse
import logging
from logger import setup_logger

setup_logger('FIXS', 'Logs/executor_message.log')
log = logging.getLogger('FIXS')

# const variable
__SOH__ = chr(1)


class Application(fix.Application):
    """FIX Acceptor Application"""
    orderID = 1
    execID = 1

    def __init__(self):
        super(Application, self).__init__()

    def genOrderID(self):
        Application.orderID += 1
        return repr(Application.orderID)

    def genExecID(self):
        Application.execID += 1
예제 #35
0
파일: main.py 프로젝트: albertclass/XStudio
	elif opt == "--gui":
		option.gui = True
	else:
		print( "opt = %s, arg = %s not found" % (opt, arg) )

if option.gui:
	app.show()

# 模块名必须配置
if option.import_name is None:
	print( "module_name not special." )
	print( __doc__ )
	sys.exit(-1)

# 配置日志,生成日志文件
setup_logger()

# 注册全局函数
__builtins__['assign'] = restrict.assign
__builtins__['invoke'] = restrict.invoke
__builtins__['var'] = restrict.var

# sys.meta_path.insert(0, MetaPathFinder())
# 动态载入测试模块
robot = __import__( option.import_name )

command("tips 'start test username=%s, password=%s, module=%s, address=%s:%d'" % (
	option.username,
	option.password,
	option.import_name,
	option.host,
예제 #36
0
def train(params):
    # Logger Setup and OS Configuration
    logger = setup_logger("SphericalGMMNet")
    logger.info("Loading Data")

    # Load Data
    train_iterator = utils.load_data_h5(params['train_dir'],
                                        batch_size=params['batch_size'])
    test_iterator = utils.load_data_h5(params['test_dir'],
                                       batch_size=params['batch_size'],
                                       rotate=True,
                                       batch=False)

    # Model Setup
    logger.info("Model Setting Up")
    model = SphericalGMMNet(params).cuda()
    model = model.cuda()

    # Model Configuration Setup
    optim = torch.optim.Adam(model.parameters(), lr=params['baselr'])
    cls_criterion = torch.nn.CrossEntropyLoss().cuda()

    # Resume If Asked
    date_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if params['resume_training']:
        date_time = params['resume_training']
        model_path = os.path.join(
            params['save_dir'],
            '{date_time}-model.ckpt'.format(date_time=date_time))
        model.load_state_dict(
            torch.load(model_path, map_location=lambda storage, loc: storage))

    # Display Parameters
    for name, value in params.items():
        logger.info("{name} : [{value}]".format(name=name, value=value))

    # Generate the grids
    # [(radius, tensor([2b, 2b, 3])) * 3]
    s2_grids = utils.get_grids(b=params['bandwidth_0'],
                               num_grids=params['num_grids'],
                               base_radius=params['base_radius'])

    # TODO [Visualize Grids]
    if params['visualize']:
        utils.visualize_grids(s2_grids)

    # Keep track of max Accuracy during training
    acc_nr, max_acc_nr = 0, 0
    acc_r, max_acc_r = 0, 0

    # Iterate by Epoch
    logger.info("Start Training")
    for epoch in range(params['num_epochs']):

        if params['save_model']:
            # Save the model for each step
            if acc_nr > max_acc_nr:
                max_acc_nr = acc_nr
                save_path = os.path.join(
                    params['save_dir'],
                    '{date_time}-[NR]-[{acc}]-model.ckpt'.format(
                        date_time=date_time, acc=acc_nr))
                torch.save(model.state_dict(), save_path)
                logger.info(
                    'Saved model checkpoints into {}...'.format(save_path))
            if acc_r > max_acc_r:
                max_acc_r = acc_r
                save_path = os.path.join(
                    params['save_dir'],
                    '{date_time}-[R]-[{acc}]-model.ckpt'.format(
                        date_time=date_time, acc=acc_r))
                torch.save(model.state_dict(), save_path)
                logger.info(
                    'Saved model checkpoints into {}...'.format(save_path))

        running_loss = []
        for batch_idx, (inputs, labels) in enumerate(train_iterator):
            """ Variable Setup """
            inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()
            B, N, D = inputs.size()

            if inputs.shape[-1] == 2:
                zero_padding = torch.zeros((B, N, 1),
                                           dtype=inputs.dtype).cuda()
                inputs = torch.cat((inputs, zero_padding), -1)  # [B, N, 3]

            # Data Mapping
            inputs = utils.data_mapping(
                inputs, base_radius=params['base_radius'])  # [B, N, 3]

            if params['visualize']:

                # TODO [Visualization [Raw]]
                origins = inputs.clone()
                utils.visualize_raw(inputs, labels)

                # TODO [Visualization [Sphere]]
                print("---------- Static ------------")
                params['use_static_sigma'] = True
                inputs1 = utils.data_translation(inputs, s2_grids, params)
                utils.visualize_sphere(origins,
                                       inputs1,
                                       labels,
                                       s2_grids,
                                       params,
                                       folder='sphere')

                print("\n---------- Covariance ------------")
                params['use_static_sigma'] = False
                params['sigma_layer_diff'] = False
                inputs2 = utils.data_translation(inputs, s2_grids, params)
                utils.visualize_sphere(origins,
                                       inputs2,
                                       labels,
                                       s2_grids,
                                       params,
                                       folder='sphere')

                print("\n---------- Layer Diff ------------")
                params['use_static_sigma'] = False
                params['sigma_layer_diff'] = True
                inputs3 = utils.data_translation(inputs, s2_grids, params)
                utils.visualize_sphere(origins,
                                       inputs3,
                                       labels,
                                       s2_grids,
                                       params,
                                       folder='other')
                return
            else:
                # Data Translation
                inputs = utils.data_translation(
                    inputs, s2_grids, params
                )  # [B, N, 3] -> list( Tensor([B, 2b, 2b]) * num_grids )
            """ Run Model """
            outputs = model(inputs)
            """ Back Propagation """
            loss = cls_criterion(outputs, labels.squeeze())
            loss.backward(retain_graph=True)
            optim.step()
            running_loss.append(loss.item())

            # Update Loss Per Batch
            logger.info(
                "Batch: [{batch}/{total_batch}] Epoch: [{epoch}] Loss: [{loss}]"
                .format(batch=batch_idx,
                        total_batch=len(train_iterator),
                        epoch=epoch,
                        loss=np.mean(running_loss)))

        acc_nr = eval(test_iterator, model, params, logger, rotate=False)
        logger.info(
            "**************** Epoch: [{epoch}/{total_epoch}] [NR] Accuracy: [{acc}] ****************\n"
            .format(epoch=epoch,
                    total_epoch=params['num_epochs'],
                    loss=np.mean(running_loss),
                    acc=acc_nr))
        acc_r = eval(test_iterator, model, params, logger, rotate=True)
        logger.info(
            "**************** Epoch: [{epoch}/{total_epoch}] [R] Accuracy: [{acc}] ****************\n"
            .format(epoch=epoch,
                    total_epoch=params['num_epochs'],
                    loss=np.mean(running_loss),
                    acc=acc_r))

    logger.info('Finished Training')