예제 #1
0
    def __init__(self, debug_dir, debug_level, debug_name):
        self.rtree = radix.Radix()

        # Logger for the pipeline
        logger.setup_logger('fwtable', debug_dir + '/' + debug_name,
                            debug_level)
        self.log = logging.getLogger('fwtable')
예제 #2
0
    def __init__(self, log_dir, log_level, registers, window_size=10, bin_time=0.05):

        # Logger for the throughput monitor
        self.log_dir = log_dir
        logger.setup_logger('throughput', log_dir+'/throughput.log', level=log_level)
        self.log = logging.getLogger('throughput')

        self.registers = registers
        self.window_size = window_size
        self.bin_time = bin_time
예제 #3
0
def do_visualization(
        cfg,
        model,
        backward_model,
        dataloader,
        device,
):
    logger = setup_logger('balad-mobile.visualization', False)
    logger.info("Start visualizing")

    create_path(cfg.OUTPUT.VIS_DIR)

    for iteration, (images, targets, paths) in tqdm(enumerate(dataloader)):
        indices = targets.view(-1).nonzero().type(torch.LongTensor)
        images = images.to(device)
        targets = targets.to(device)
        predictions, activations = model(images, targets)
        backward_segmentations = backward_model(activations)

        images, steering_commands, backward_segmentations, indices = \
            images.cpu(), targets.cpu(), backward_segmentations.cpu(), indices.cpu()
        for i in indices:
            image, image_mask = images[i, :, :, :], backward_segmentations[i, :, :, :]
            path = cfg.OUTPUT.VIS_DIR + "/" + paths[i]
            worker(image, image_mask, path)
예제 #4
0
def do_train(cfg, model, dataloader_train, dataloader_evaluation, optimizer,
             device):
    # set mode to training for model (matters for Dropout, BatchNorm, etc.)
    model.train()

    # get the trainer logger and visdom
    visdom = VisdomLogger(cfg.LOG.PLOT.DISPLAY_PORT)
    visdom.register_keys(['loss'])
    logger = setup_logger('balad-mobile.train', False)
    logger.info("Start training")

    output_dir = os.path.join(
        cfg.LOG.PATH,
        'run_{}'.format(datetime.now().strftime("%Y-%m-%d_%H:%M:%S")))
    os.makedirs(output_dir)

    # start the training loop
    for epoch in range(cfg.SOLVER.EPOCHS):
        for iteration, (images, steering_commands,
                        _) in enumerate(dataloader_train):
            images = images.to(device)
            steering_commands = steering_commands.to(device)

            predictions, loss = model(images, steering_commands)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if iteration % cfg.LOG.PERIOD == 0:
                visdom.update({'loss': [loss.item()]})
                logger.info("LOSS: \t{}".format(loss))

            if iteration % cfg.LOG.PLOT.ITER_PERIOD == 0:
                visdom.do_plotting()

            step = epoch * len(dataloader_train) + iteration
            if step % cfg.LOG.WEIGHTS_SAVE_PERIOD == 0 and iteration:
                torch.save(
                    model.state_dict(),
                    os.path.join(output_dir,
                                 'weights_{}.pth'.format(str(step))))
                do_evaluation(cfg, model, dataloader_evaluation, device)

    torch.save(model.state_dict(), os.path.join(output_dir,
                                                'weights_final.pth'))
예제 #5
0
def main():
    parser = argparse.ArgumentParser(description="PyTorch Self-driving Car Training and Inference.")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="file",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--mode",
        default="test",
        metavar="mode",
        help="'train' or 'test'",
        type=str,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    # build the config
    cfg = get_cfg_defaults()
    # cfg.merge_from_file(args.config_file)
    # cfg.merge_from_list(args.opts)
    cfg.freeze()

    # setup the logger
    if not os.path.isdir(cfg.OUTPUT.DIR):
        os.mkdir(cfg.OUTPUT.DIR)
    logger = setup_logger("balad-mobile.train", cfg.OUTPUT.DIR,
                          '{0:%Y-%m-%d %H:%M:%S}_log'.format(datetime.now()))
    logger.info(args)
    logger.info("Running with config:\n{}".format(cfg))

    # TRAIN
    train(cfg)

    # Visualize
    visualization(cfg)
예제 #6
0
    def make_logging(self):
        # Logger for the pipeline
        logger.setup_logger('p4_to_controller', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'.log', level=logging.INFO)
        self.log = logging.getLogger('p4_to_controller')

        # Logger for the sliding window
        logger.setup_logger('p4_to_controller_sw', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_sw.log', level=logging.INFO)
        self.log_sw = logging.getLogger('p4_to_controller_sw')

        # Logger for the rerouting
        logger.setup_logger('p4_to_controller_rerouting', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_rerouting.log', level=logging.INFO)
        self.log_rerouting = logging.getLogger('p4_to_controller_rerouting')

        # Logger for the Flow Selector
        logger.setup_logger('p4_to_controller_fs', self.log_dir+'/p4_to_controller_'+ \
            str(self.sw_name)+'_fs.log', level=logging.INFO)
        self.log_fs = logging.getLogger('p4_to_controller_fs')
def do_evaluation(
        cfg,
        model,
        dataloader,
        device,
        verbose=False,
):
    logger = setup_logger('balad-mobile.evaluation', False)
    logger.info("Start evaluating")

    loss_records = []
    for iteration, (images, steering_commands, _) in tqdm(enumerate(dataloader)):
        images = images.to(device)
        steering_commands = steering_commands.to(device)
        predictions, loss = model(images, steering_commands)
        loss_records.append(loss.item())
        if verbose:
            logger.info("LOSS: \t{}".format(loss))

    logger.info('LOSS EVALUATION: {}'.format(np.mean(loss_records)))
예제 #8
0
파일: example.py 프로젝트: k-128/cxwrap
def main():
    setup_logger()
    logger = logging.getLogger()

    def cmc_examples():
        with open("bin/keys/cmc_k.txt", "r") as f:
            cmc_key = f.read()

        cmc = CryptoWrapper(api="CMC",
                            api_key=cmc_key,
                            cache_expire=240,
                            max_retries=1)
        cmc_wrapper = cmc.wrapper

        # Get list of available endpoints (functions)
        # logger.info(cmc_wrapper.__getfunctions__())

        resp = cmc_wrapper.global_aggregate_metrics_latest_GET()
        logger.info(resp["data"]["quote"]["USD"]["total_market_cap"])
        resp = cmc_wrapper.cryptocurrency_info_GET(symbol="BTC")
        logger.info(resp["data"]["BTC"]["date_added"])

    def cryptocompare_examples():
        with open("bin/keys/cryptocompare_k.txt", "r") as f:
            cryptocompare_key = f.read()

        cryptocompare = CryptoWrapper(api="CryptoCompare",
                                      api_key=cryptocompare_key)
        cryptocompare_wrapper = cryptocompare.wrapper

        # Get list of available endpoints (functions)
        # logger.info(cryptocompare_wrapper.__getfunctions__())

        resp = cryptocompare_wrapper.price_GET(fsym="BTC", tsyms="USD,JPY,EUR")
        logger.info(resp)
        resp = cryptocompare_wrapper.historical_daily_ohlcv_GET(fsym="BTC",
                                                                tsym="USD",
                                                                limit=1)
        logger.info(resp)

    def bitmex_examples():
        # with open("bin/keys/bitmex_k.txt", "r") as f:
        #     bitmex_key = f.read()
        # with open("bin/keys/bitmex_s.txt", "r") as f:
        #     bitmex_secret = f.read()

        bitmex = CryptoWrapper(
            api="BitMEX",
            # api_key=bitmex_key,
            # api_secret=bitmex_secret,
            cache_expire=0,
            max_retries=2)
        bitmex_wrapper = bitmex.wrapper

        # To alternate between mainnet & testnet:
        bitmex_wrapper.BASE_URL = "https://www.bitmex.com/api/v1"

        # bitmex_wrapper.BASE_URL = "https://testnet.bitmex.com/api/v1"

        def example_1():
            resp = bitmex_wrapper.announcement_GET(columns=["title", "date"])
            logger.info(resp[0])

        def example_2():
            resp = bitmex_wrapper.order_bulk_POST(orders=json.dumps([{
                "symbol":
                "XBTUSD",
                "orderQty":
                250,
                "price":
                1000
            }, {
                "symbol":
                "XBTUSD",
                "orderQty":
                500,
                "price":
                2500
            }]), )
            logger.info(resp)

        def example_3():
            resp = bitmex_wrapper.chat_GET(count=2)
            logger.info(resp[0]["date"])

        # Get list of available endpoints (functions)
        # logger.info(bitmex_wrapper.__getfunctions__())

        example_1()
        # example_2()
        # example_3()

    def binance_examples():
        # with open("bin/keys/binance_k.txt", "r") as f:
        #     binance_key = f.read()
        # with open("bin/keys/binance_s.txt", "r") as f:
        #     binance_secret = f.read()

        binance = CryptoWrapper(
            api="Binance",
            # api_key=binance_key,
            # api_secret=binance_secret,
            max_retries=2)
        binance_wrapper = binance.wrapper

        def example_1():
            resp = binance_wrapper.exchange_information_GET()
            logger.info(resp)

        def example_2():
            resp = binance_wrapper.order_test_POST(
                symbol="LTCBTC",
                side="BUY",
                type="LIMIT",
                timeInForce="GTC",
                quantity=10,
                price=0.009,
                recvWindow=5000,
                timestamp=int(time() * 1000 - 2000))
            logger.info(resp)

        def example_3():
            resp = binance_wrapper.user_wallet_deposit_address_GET(
                asset="BTC",
                recvWindow=5000,
                timestamp=int(time() * 1000 - 2000))
            logger.info(resp)

        # Get list of available endpoints (functions)
        # logger.info(binance_wrapper.__getfunctions__())

        example_1()
        # example_2()
        # example_3()

    def binance_dex_examples():
        binance_dex = CryptoWrapper(api="BinanceDEX", max_retries=2)
        binance_dex_wrapper = binance_dex.wrapper

        # Get list of available endpoints (functions)
        # logger.info(binance_dex_wrapper.__getfunctions__())

        resp = binance_dex_wrapper.fees_GET()
        logger.info(resp)
        resp = binance_dex_wrapper.tokens_GET()
        logger.info(resp)

    def bitfinex_examples():
        bitfinex = CryptoWrapper(api="Bitfinex")
        bitfinex_wrapper = bitfinex.wrapper

        # Get list of available endpoints (functions)
        # logger.info(bitfinex_wrapper.__getfunctions__())

        resp = bitfinex_wrapper.platform_status_GET()
        logger.info(resp)
        resp = bitfinex_wrapper.tickers_GET(symbols="tBTCUSD,tLTCUSD,fUSD")
        logger.info(resp)

    def deribit_examples():
        # with open("bin/keys/deribit_k.txt", "r") as f:
        #     deribit_key = f.read()
        # with open("bin/keys/deribit_s.txt", "r") as f:
        #     deribit_secret = f.read()

        deribit = CryptoWrapper(
            api="Deribit",
            # api_key=deribit_key,
            # api_secret=deribit_secret,
            cache_expire=0,
            max_retries=2)
        deribit_wrapper = deribit.wrapper

        # To alternate between mainnet & testnet:
        deribit_wrapper.BASE_URL = "https://www.deribit.com/api/v2"

        # deribit_wrapper.BASE_URL = "https://test.deribit.com/api/v2"

        def example_1():
            resp = deribit_wrapper.get_time_GET()
            logger.info(resp)

        def example_2():
            resp = deribit_wrapper.get_contract_size_GET(
                instrument_name="BTC-PERPETUAL")
            logger.info(resp)

        def example_3():
            resp = deribit_wrapper.order_buy_GET(
                instrument_name="BTC-PERPETUAL",
                amount=500,
                type="limit",
                label="test",
                price=1000)
            logger.info(resp)

        # Get list of available endpoints (functions)
        # logger.info(deribit_wrapper.__getfunctions__())

        example_1()
        # example_2()
        # example_3()

    try:
        cmc_examples()
        # cryptocompare_examples()
        # bitmex_examples()
        # binance_examples()
        # binance_dex_examples()
        # bitfinex_examples()
        # deribit_examples()

    except Exception as e:
        logger.info(f"Exception: {e}")
예제 #9
0
from util.discord.channel import ChannelUtil
from util.env import Env
from util.logger import setup_logger
from version import __version__

import asyncio
import discord
import logging
from rpc.client import RPCClient
from tasks.transaction_queue import TransactionQueue

# Configuration
config = Config.instance()

# Setup logger
setup_logger(config.log_file,
             log_level=logging.DEBUG if config.debug else logging.INFO)
logger = logging.getLogger()

client = Bot(command_prefix=config.command_prefix)
client.remove_command('help')

### Bot events


@client.event
async def on_ready():
    logger.info(f"Starting Graham v{__version__}")
    logger.info(f"Discord.py version {discord.__version__}")
    logger.info(f"Bot name: {client.user.name}")
    logger.info(f"Bot Discord ID: {client.user.id}")
    await client.change_presence(activity=discord.Game(config.playing_status))
예제 #10
0
                    required=True)
parser.add_argument('--threshold',
                    type=int,
                    default=31,
                    help='Threshold used to decide when to fast reroute')

args = parser.parse_args()
port = args.port
log_dir = args.log_dir
log_level = args.log_level
topo_db = args.topo_db
routing_file = args.routing_file
threshold = args.threshold

# Logger for the controller
logger.setup_logger('controller', log_dir + '/controller.log', level=log_level)
log = logging.getLogger('controller')

log.info(str(port)+'\t'+str(log_dir)+'\t'+str(log_level)+'\t'+str(routing_file)+ \
'\t'+str(threshold))

# Read the topology
topo = Topology(db=topo_db)

mapping_dic = {}
tmp = list(topo.get_hosts()) + list(topo.get_p4switches())
mapping_dic = {k: v for v, k in enumerate(tmp)}
log.info(str(mapping_dic))
"""
    This function adds an entry in a match+action table of the switch
"""
예제 #11
0
    def __init__(self, port, log_dir, log_level, window_size, nbprefixes, \
    nbflows_prefix, eviction_timeout, seed):

        # Logger for the pipeline
        logger.setup_logger('pipeline',
                            log_dir + '/pipeline.log',
                            level=log_level)
        self.log = logging.getLogger('pipeline')

        self.log.log(20, str(port)+'\t'+str(log_dir)+'\t'+str(log_level)+'\t'+ \
        str(window_size)+'\t'+str(nbprefixes)+'\t'+str(nbflows_prefix)+'\t'+ \
        '\t'+str(eviction_timeout)+'\t'+str(seed))

        self.ip_controller = 'localhost'
        self.port_controller = port
        self.seed = seed

        # Dictionnary with all the forwarding table
        self.fwtables = {}
        self.fwtables['meta_fwtable'] = FWTable(log_dir, log_level,
                                                'meta_fwtable.log')

        # Dictionnary with all the registers array
        self.registers = {}

        self.registers['flowselector_key'] = [0] * (
            nbflows_prefix * nbprefixes
        )  # *100 to make sure there is no overflow
        self.registers['flowselector_ts'] = [0] * (nbflows_prefix * nbprefixes)
        self.registers['flowselector_nep'] = [0] * (
            nbflows_prefix * nbprefixes)  # nep for Next Expected Packet
        self.registers['flowselector_last_ret'] = [0] * (
            nbflows_prefix * nbprefixes)  # Timestamp
        self.registers['flowselector_5tuple'] = [''] * (
            nbflows_prefix * nbprefixes)  # Just used in the python implem

        # Registers used for the sliding window
        self.registers['sw'] = []
        for _ in xrange(0, nbprefixes):
            self.registers['sw'] += [0] * window_size
        self.registers['sw_index'] = [0] * nbprefixes
        self.registers['sw_time'] = [0] * nbprefixes
        self.registers['sw_sum'] = [0] * nbprefixes

        # Registers used for the throughput sliding window
        self.registers['sw_throughput'] = []
        for _ in xrange(0, nbprefixes):
            self.registers['sw_throughput'] += [0] * window_size
        self.registers['sw_index_throughput'] = [0] * nbprefixes
        self.registers['sw_time_throughput'] = [0] * nbprefixes
        self.registers['sw_sum1_throughput'] = [0] * nbprefixes
        self.registers['sw_sum2_throughput'] = [0] * nbprefixes

        self.registers['threshold_registers'] = [50] * nbprefixes

        self.registers['next_hops_index'] = [0] * nbprefixes
        self.registers['next_hops_port'] = [2, 3, 4] * nbprefixes

        # This is the FlowSelector, use to keep track of a defined number of
        # active flows per prefix
        self.flowselector = FlowSelector(log_dir, 20, self.registers, 32, \
            nbflows_prefix, eviction_timeout, self.seed)

        self.throughput = ThroughputMonitor(log_dir, 20, self.registers)

        # Socket used to communicate with the controller
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_address = (self.ip_controller, self.port_controller)
        while True:
            status = self.sock.connect_ex(server_address)
            if status == 0:
                print 'Connected!'
                break
            else:
                print 'Could not connect, retry in 2 seconds..'
                time.sleep(2)

        fcntl.fcntl(self.sock, fcntl.F_SETFL, os.O_NONBLOCK)

        self.data = ''

        # Read flow table rules from the controller until the table is fully populated
        self.ready = False
        while not self.ready:
            self.read_controller()
            pass
예제 #12
0
"""Module to be run first time to set up the database
* Drops all tables if the exist and creates them again.
* Populates responses from Gamepedia
* Populates heroes from Gamepedia and Dota 2 subreddit CSS.
"""
from parsers import css_parser, wiki_parser
from util.database.database import db_api

__author__ = 'MePsyDuck'

from util.logger import setup_logger


def first_run():
    db_api.drop_all_tables()
    db_api.create_all_tables()
    wiki_parser.populate_responses()
    css_parser.populate_heroes()


if __name__ == '__main__':
    setup_logger()
    first_run()
예제 #13
0
                    type=str,
                    default='log',
                    help='Log Directory',
                    required=False)
args = parser.parse_args()
dst_ip = args.dst_ip
src_ports = args.src_ports
dst_ports = args.dst_ports
ipd = args.ipd
duration = args.duration
log_dir = args.log_dir

process_list = []

logger.setup_logger('traffic_generation',
                    log_dir + '/traffic_generation.log',
                    level=logging.INFO)
log = logging.getLogger('traffic_generation')

for src_port, dst_port in zip(range(int(src_ports.split(',')[0]), int(src_ports.split(',')[1])), \
    range(int(dst_ports.split(',')[0]), int(dst_ports.split(',')[1]))):

    flow_template = {
        "dst": dst_ip,
        "dport": dst_port,
        "sport": src_port,
        "ipd": ipd,
        "duration": duration
    }

    process = multiprocessing.Process(target=sendFlowTCP, kwargs=flow_template)
예제 #14
0
from weixin.weixin import server
from util.logger import setup_logger

import logging

setup_logger('weixin.log')
_logger = logging.getLogger(__name__)

while True:
    try:
        server()
    except Exception as e:
        _logger.exception(e)
예제 #15
0
def main(args):
    utils.init_distributed_mode(args)
    logger = setup_logger(output=args.output_dir,
                          distributed_rank=dist.get_rank(),
                          name="DETR",
                          phase="train" if not args.eval else "eval")
    logger.info(args)
    # logger.info("git:\n  {}\n".format(utils.get_sha()))

    if args.frozen_weights is not None:
        assert args.masks, "Frozen training is meant for segmentation only"

    device = torch.device(args.device)

    # fix the seed for reproducibility
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    model, criterion, postprocessors = build_model(args)
    logger.info(model)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module
    n_parameters = sum(p.numel() for p in model.parameters()
                       if p.requires_grad)
    logger.info('number of params: {}'.format(n_parameters))

    param_dicts = [
        {
            "params": [
                p for n, p in model_without_ddp.named_parameters()
                if "backbone" not in n and p.requires_grad
            ]
        },
        {
            "params": [
                p for n, p in model_without_ddp.named_parameters()
                if "backbone" in n and p.requires_grad
            ],
            "lr":
            args.lr_backbone,
        },
    ]
    optimizer = torch.optim.AdamW(param_dicts,
                                  lr=args.lr,
                                  weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)

    dataset_train = build_dataset(image_set='train', args=args)
    dataset_val = build_dataset(image_set='val', args=args)

    if args.distributed:
        sampler_train = DistributedSampler(dataset_train)
        sampler_val = DistributedSampler(dataset_val, shuffle=False)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

    batch_sampler_train = torch.utils.data.BatchSampler(sampler_train,
                                                        args.batch_size,
                                                        drop_last=True)

    data_loader_train = DataLoader(dataset_train,
                                   batch_sampler=batch_sampler_train,
                                   collate_fn=utils.collate_fn,
                                   num_workers=args.num_workers)
    data_loader_val = DataLoader(dataset_val,
                                 args.batch_size,
                                 sampler=sampler_val,
                                 drop_last=False,
                                 collate_fn=utils.collate_fn,
                                 num_workers=args.num_workers)

    if args.dataset_file == "coco_panoptic":
        # We also evaluate AP during panoptic training, on original coco DS
        coco_val = datasets.coco.build("val", args)
        base_ds = get_coco_api_from_dataset(coco_val)
    else:
        base_ds = get_coco_api_from_dataset(dataset_val)

    if args.frozen_weights is not None:
        checkpoint = torch.load(args.frozen_weights, map_location='cpu')
        model_without_ddp.detr.load_state_dict(checkpoint['model'])

    output_dir = Path(args.output_dir)
    if args.resume:
        if args.resume == 'auto':
            ckpt = os.path.join(args.output_dir, 'checkpoint.pth')
            if os.path.isfile(ckpt):
                checkpoint = torch.load(os.path.join(args.output_dir,
                                                     'checkpoint.pth'),
                                        map_location='cpu')
                msg = model_without_ddp.load_state_dict(checkpoint['model'],
                                                        strict=False)
                logger.info('Missing keys: {}'.format(msg.missing_keys))
                if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
                    args.start_epoch = checkpoint['epoch'] + 1
        else:
            if args.resume.startswith('https'):
                checkpoint = torch.hub.load_state_dict_from_url(
                    args.resume, map_location='cpu', check_hash=True)
            else:
                checkpoint = torch.load(args.resume, map_location='cpu')
            model_without_ddp.load_state_dict(checkpoint['model'])
            if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
                optimizer.load_state_dict(checkpoint['optimizer'])
                lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
                args.start_epoch = checkpoint['epoch'] + 1

    if args.eval:
        test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
                                              data_loader_val, base_ds, device,
                                              args.output_dir)
        if args.output_dir:
            utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval,
                                 output_dir / "eval.pth")
        return

    logger.info("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            sampler_train.set_epoch(epoch)
        train_stats = train_one_epoch(model, criterion, data_loader_train,
                                      optimizer, device, epoch,
                                      args.clip_max_norm)
        lr_scheduler.step()
        if args.output_dir:
            checkpoint_paths = [output_dir / 'checkpoint.pth']
            # extra checkpoint before LR drop and every 100 epochs
            if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
                checkpoint_paths.append(output_dir /
                                        f'checkpoint{epoch:04}.pth')
            for checkpoint_path in checkpoint_paths:
                utils.save_on_master(
                    {
                        'model': model_without_ddp.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'lr_scheduler': lr_scheduler.state_dict(),
                        'epoch': epoch,
                        'args': args,
                    }, checkpoint_path)

        if (epoch + 1) % args.eval_freq == 0:
            test_stats, coco_evaluator = evaluate(model, criterion,
                                                  postprocessors,
                                                  data_loader_val, base_ds,
                                                  device, args.output_dir)

            log_stats = {
                **{f'train_{k}': v
                   for k, v in train_stats.items()},
                **{f'test_{k}': v
                   for k, v in test_stats.items()}, 'epoch': epoch,
                'n_parameters': n_parameters
            }

            if args.output_dir and utils.is_main_process():
                with (output_dir / "log.txt").open("a") as f:
                    f.write(json.dumps(log_stats) + "\n")

                # for evaluation logs
                if coco_evaluator is not None:
                    (output_dir / 'eval').mkdir(exist_ok=True)
                    if "bbox" in coco_evaluator.coco_eval:
                        filenames = ['latest.pth']
                        if epoch % 50 == 0:
                            filenames.append(f'{epoch:03}.pth')
                        for name in filenames:
                            torch.save(coco_evaluator.coco_eval["bbox"].eval,
                                       output_dir / "eval" / name)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    logger.info('Training time {}'.format(total_time_str))
예제 #16
0
from channel.mq_client import MsgQueue
from util.constants import Constants
from util.db import DB, Collection
from util.dump_stack import dumpstacks 
from util.logger import setup_logger
from util.plot import HistoryPlot as Plot

from datetime import datetime
import json
import logging
import time

setup_logger('backtest.log')
_logger = logging.getLogger(__name__)

class SingleBackTest(object):
    def __init__(self, stock_code):
        self.__stock_code = stock_code
        self.__mq = MsgQueue()

    def start(self):
        self.__mq.start()

    def stop(self):
        self.__mq.stop()

    def test(self, start, end):
        stock_info = {'stock': self.__stock_code, 'start': start, 'end': end}
        self.__mq.send(json.dumps(stock_info))
        time.sleep(1)
        msg = self.__mq.recv()
예제 #17
0
from util.db import DB, Collection
from util.config import Config
from util.constants import Constants
from util.file_db import FileDB
from util.logger import setup_logger
from util.dump_stack import dumpstacks
from util.util import Util

from collections import OrderedDict
from datetime import datetime, timedelta
import json
import logging
import pymongo
import time

setup_logger('strategy_engine.log')
_logger = logging.getLogger(__name__)


class StrategyEngine(object):
    DEFAULT_STRATEGY = 'macd_strategy'
    DEFAULT_BAR_PERIOD = 30

    def __init__(self):
        self.__mq_server = None
        self.__data_db = DB(Constants.HIST_DATA_DB_NAME)
        self.__config = Config()
        self.__tick_db = FileDB(
            self.__config.get_config('persistent', 'hist_tick_dir'))
        self.__trading_strategy = None
        self.__tick_collector = None
예제 #18
0
app = Flask(__name__)
gauge_db = GaugeDB()

# Needed for single main.py file
THREADED_RUN = True

# Make 80 for AWS EC2, default is 5000
PORT = 80

# Make 0.0.0.0 to IP redirect, default is 127.0.0.1
HOST = '0.0.0.0'

DEBUG_SERVER = False

# Logger
logfix = setup_logger()

currencies = ["GBP/USD","EUR/USD","USD/CHF","USD/JPY","USD/CAD","USD/SGD","AUD/USD"]

try:
    g_data = gauge_db.get_latest_gauge()
    currjson = g_data.get("currencies")
    gauge = g_data.get("GAU")
except:
    currjson = {
        "GBP":0.0,
        "EUR":0.0,
        "CHF":0.0,
        "JPY":0.0,
        "CAD":0.0,
        "SGD":0.0,
예제 #19
0
from util.db import DB, Collection
from util.config import Config
from util.constants import Constants
from util.file_db import FileDB
from util.logger import setup_logger
from util.dump_stack import dumpstacks 
from util.util import Util

from collections import OrderedDict
from datetime import datetime, timedelta
import json
import logging
import pymongo
import time

setup_logger('strategy_engine.log')
_logger = logging.getLogger(__name__)

class StrategyEngine(object):
    DEFAULT_STRATEGY = 'macd_strategy'
    DEFAULT_BAR_PERIOD = 30
    def __init__(self):
        self.__mq_server = None
        self.__data_db = DB(Constants.HIST_DATA_DB_NAME)
        self.__config = Config()
        self.__tick_db = FileDB(self.__config.get_config('persistent', 'hist_tick_dir'))
        self.__trading_strategy = None
        self.__tick_collector = None

    def start(self):
        Util.set_token()
예제 #20
0
from collector.hist_data_collector import HistDataCollector
from collector.stock_basic_collector import StockBasicCollector

from util.constants import Constants
from util.db import DB, Collection
from util.logger import setup_logger

from apscheduler.schedulers.blocking import BlockingScheduler
import time

import logging

setup_logger('period_gather.log')
_logger = logging.getLogger(__name__)


class PeriodGather(object):
    def __init__(self, db):
        self.__db = db
        self.__scheduler = BlockingScheduler()
        self.__scheduler.add_job(self.gather,
                                 'cron',
                                 day_of_week='mon-fri',
                                 hour=16,
                                 minute=30)

    def start(self):
        self.__scheduler.start()

    def gather(self):
        _logger.info('period gather stock basic and history data, begin.....')
예제 #21
0
def main():

    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=32,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=30,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--centerloss',
                        '-c',
                        action='store_true',
                        default=False,
                        help='Use center loss')
    parser.add_argument('--alpha_ratio',
                        '-a',
                        type=float,
                        default=0.5,
                        help='alpha ratio')
    parser.add_argument('--lambda_ratio',
                        '-l',
                        type=float,
                        default=0.1,
                        help='lambda ratio')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    logger = setup_logger(__name__)
    logger.info("GPU: {}".format(args.gpu))
    logger.info("# Minibatch-size: {}".format(args.batchsize))
    logger.info("# epoch: {}".format(args.epoch))
    logger.info("Calculate center loss: {}".format(args.centerloss))
    if args.centerloss:
        logger.info('# alpha: {}'.format(args.alpha_ratio))
        logger.info('# lambda: {}'.format(args.lambda_ratio))

    NUM_CLASSES = 10

    model = LeNets(
        out_dim=NUM_CLASSES,
        alpha_ratio=args.alpha_ratio,
        lambda_ratio=args.lambda_ratio,
        is_center_loss=args.centerloss,
    )

    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST dataset
    train, test = chainer.datasets.get_mnist(ndim=3)

    train_iter = chainer.iterators.MultiprocessIterator(train,
                                                        args.batchsize,
                                                        n_processes=4)
    test_iter = chainer.iterators.MultiprocessIterator(test,
                                                       args.batchsize,
                                                       n_processes=4,
                                                       repeat=False,
                                                       shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test dataset for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    # Dump a computational graph from 'loss' variable at the first iteration
    # The "main" refers to the target link of the "main" optimizer.
    trainer.extend(extensions.dump_graph('main/loss'))

    # Take a snapshot for each specified epoch
    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))

    # Write a log of evaluation statistics for each epoch
    trainer.extend(extensions.LogReport())

    # Save two plot images to the result dir
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch',
                file_name='accuracy.png'))

    # Print selected entries of the log to stdout
    # Here "main" refers to the target link of the "main" optimizer again, and
    # "validation" refers to the default name of the Evaluator extension.
    # Entries other than 'epoch' are reported by the Classifier link, called by
    # either the updater or the evaluator.
    trainer.extend(
        extensions.PrintReport([
            'epoch', 'iteration', 'main/loss', 'validation/main/loss',
            'main/accuracy', 'validation/main/accuracy', 'elapsed_time'
        ]))

    # Visualize Deep Features
    trainer.extend(VisualizeDeepFeature(train[:10000], NUM_CLASSES,
                                        args.centerloss),
                   trigger=(1, 'epoch'))

    # Print a progress bar to stdout
    trainer.extend(extensions.ProgressBar())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
예제 #22
0
from channel.mq_client import MsgQueue
from util.constants import Constants
from util.db import DB, Collection
from util.dump_stack import dumpstacks
from util.logger import setup_logger
from util.plot import HistoryPlot as Plot

from datetime import datetime
import json
import logging
import time

setup_logger('backtest.log')
_logger = logging.getLogger(__name__)


class SingleBackTest(object):
    def __init__(self, stock_code):
        self.__stock_code = stock_code
        self.__mq = MsgQueue()

    def start(self):
        self.__mq.start()

    def stop(self):
        self.__mq.stop()

    def test(self, start, end):
        stock_info = {'stock': self.__stock_code, 'start': start, 'end': end}
        self.__mq.send(json.dumps(stock_info))
        time.sleep(1)
예제 #23
0
from collector.hist_data_collector import HistDataCollector
from collector.stock_basic_collector import StockBasicCollector

from util.constants import Constants
from util.db import DB, Collection
from util.logger import setup_logger

from apscheduler.schedulers.blocking import BlockingScheduler
import time

import logging

setup_logger('period_gather.log')
_logger = logging.getLogger(__name__)

class PeriodGather(object):
    def __init__(self, db): 
        self.__db = db
        self.__scheduler = BlockingScheduler()
        self.__scheduler.add_job(self.gather, 'cron', day_of_week='mon-fri', hour=16, minute=30)

    def start(self):
        self.__scheduler.start()

    def gather(self):
        _logger.info('period gather stock basic and history data, begin.....')
        try:
            StockBasicCollector(self.__db).collect()
            stock_list = self.__get_stock_list()
            for stock in stock_list:
                HistDataCollector(stock, self.__db).collect()
예제 #24
0
import os
from bunq.sdk import context

import util.logger as logger
from api.client import Client
from bot.bot import TelegramBot
from func.interface import BudgetApiInterface

_API_KEY = os.environ['BUNQ_BOT_API_KEY']
_BOT_TOKEN = os.environ['BUNQ_BOT_TOKEN']
_DEVICE_DESCRIPTION = 'Better Bunq Bot'
_LOG_FILENAME = 'better-bunq-bot.log'

# Change this one, once you're ready to leave the SandBox!
_ENVIRONMENT = context.ApiEnvironmentType.PRODUCTION

if __name__ == "__main__":
    logger.setup_logger(_LOG_FILENAME)
    Client.setup_api_context(_ENVIRONMENT, _API_KEY, _DEVICE_DESCRIPTION)

    budget_interface = BudgetApiInterface()
    bot = TelegramBot(_BOT_TOKEN, budget_interface)