Esempio n. 1
0
#!/usr/bin/python
import struct
import bitstruct
import time
import os
import signal
import sys
from src import l2tp, ipsec, frame
from src import log
from twisted.internet.task import LoopingCall
from twisted.internet import reactor, protocol
from src.utils import *

# initiate globals
logger = log.get_logger()


def main():
    def graceful_shutdown():
        # handle the shutdown procedure gracefully
        logger.info("Shutting down server")
        l2tp_server.shutdown()
        ipsec_server.shutdown()

    try:
        # initiate everything
        logger.info("Initiating server")
        config = load_config()
        l2tp_server = l2tp.L2TPServer(**config['L2TP'])
        ipsec_server = ipsec.IPsecServer(**config['IPsec'])
from flask_socketio import SocketIO, emit
from esdl.processing.EcoreDocumentation import EcoreDocumentation
from esdl.processing.ESDLEcore import instantiate_type
from esdl.processing.ESDLDataLayer import ESDLDataLayer
from extensions.session_manager import get_session
from extensions.vue_backend.control_strategy import get_control_strategy_info, set_control_strategy
from extensions.vue_backend.cost_information import set_cost_information
from dataclasses import asdict
from extensions.vue_backend.messages.DLA_table_data_message import DLA_table_data_request, DLA_table_data_response, \
    DLA_set_table_data_request
from extensions.vue_backend.messages.DLA_delete_ref_message import DeleteRefMessage
from src.asset_draw_toolbar import AssetDrawToolbar
import src.log as log
import esdl

logger = log.get_logger(__name__)


class DataLayerAPI:
    def __init__(self, flask_app: Flask, socket: SocketIO,
                 esdl_doc: EcoreDocumentation):
        self.flask_app = flask_app
        self.socketio = socket
        self.datalayer = ESDLDataLayer(esdl_doc)
        self.register()

    def register(self):
        logger.info("Registering DataLayerAPI")

        # Implementation that could be used for the ESDL Browser
        @self.socketio.on('DLA_get_object_info', namespace='/esdl')
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import SGD

import src.config as cf
from src.data_generator import TextSequenceGenerator
from src.models import CRNN_model
from src.log import get_logger

logger = get_logger(__name__)


def train():

    train_set = TextSequenceGenerator(cf.WORDS_TRAIN,
                                      img_size=cf.IMAGE_SIZE,
                                      max_text_len=cf.MAX_LEN_TEXT,
                                      downsample_factor=cf.DOWNSAMPLE_FACTOR)
    test_set = TextSequenceGenerator(cf.WORDS_TEST,
                                     img_size=cf.IMAGE_SIZE,
                                     max_text_len=cf.MAX_LEN_TEXT,
                                     downsample_factor=cf.DOWNSAMPLE_FACTOR,
                                     shuffle=False,
                                     data_aug=False)

    no_train_set = train_set.ids
    no_val_set = test_set.ids
    logger.info("No train set: %d", no_train_set)
    logger.info("No val set: %d", no_val_set)

    model, y_func = CRNN_model()
Esempio n. 4
0
from keras.models import load_model
import os
import sys
from keras.models import Model
from keras import backend as K
import cv2
import numpy as np
import time
import argparse
import logging
from src.io import checkdir
from src import postprocess
import gdal

# Setup logging
logger = log.get_logger('evaluating')
log.log2file('evaluating')

parser = argparse.ArgumentParser(
    description='See description below to see all available options')

parser.add_argument('-d',
                    '--data',
                    help='Input directory containing image and label',
                    required=True)

parser.add_argument(
    '-sg',
    '--skip_gridding',
    type=int,
    help='If gridding is already done then skip it. [Default] is No = 0',
Esempio n. 5
0
def main(args):

    if args.is_distributed == 0:
        cfg = faceqa_1p_cfg
    else:
        cfg = faceqa_8p_cfg

    cfg.data_lst = args.train_label_file
    cfg.pretrained = args.pretrained

    # Init distributed
    if args.is_distributed:
        init()
        cfg.local_rank = get_rank()
        cfg.world_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
    else:
        parallel_mode = ParallelMode.STAND_ALONE

    # parallel_mode 'STAND_ALONE' do not support parameter_broadcast and mirror_mean
    context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                      device_num=cfg.world_size,
                                      gradients_mean=True)

    mindspore.common.set_seed(1)

    # logger
    cfg.outputs_dir = os.path.join(
        cfg.ckpt_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    cfg.logger = get_logger(cfg.outputs_dir, cfg.local_rank)
    loss_meter = AverageMeter('loss')

    # Dataloader
    cfg.logger.info('start create dataloader')
    de_dataset = faceqa_dataset(imlist=cfg.data_lst,
                                local_rank=cfg.local_rank,
                                world_size=cfg.world_size,
                                per_batch_size=cfg.per_batch_size)
    cfg.steps_per_epoch = de_dataset.get_dataset_size()
    de_dataset = de_dataset.repeat(cfg.max_epoch)
    de_dataloader = de_dataset.create_tuple_iterator(output_numpy=True)
    # Show cfg
    cfg.logger.save_args(cfg)
    cfg.logger.info('end create dataloader')

    # backbone and loss
    cfg.logger.important_info('start create network')
    create_network_start = time.time()

    network = FaceQABackbone()
    criterion = CriterionsFaceQA()

    # load pretrain model
    if os.path.isfile(cfg.pretrained):
        param_dict = load_checkpoint(cfg.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('network.'):
                param_dict_new[key[8:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        cfg.logger.info('load model {} success'.format(cfg.pretrained))

    # optimizer and lr scheduler
    lr = warmup_step(cfg, gamma=0.9)
    opt = Momentum(params=network.trainable_params(),
                   learning_rate=lr,
                   momentum=cfg.momentum,
                   weight_decay=cfg.weight_decay,
                   loss_scale=cfg.loss_scale)

    # package training process, adjust lr + forward + backward + optimizer
    train_net = BuildTrainNetwork(network, criterion)
    train_net = TrainOneStepCell(
        train_net,
        opt,
        sens=cfg.loss_scale,
    )

    # checkpoint save
    if cfg.local_rank == 0:
        ckpt_max_num = cfg.max_epoch * cfg.steps_per_epoch // cfg.ckpt_interval
        train_config = CheckpointConfig(
            save_checkpoint_steps=cfg.ckpt_interval,
            keep_checkpoint_max=ckpt_max_num)
        ckpt_cb = ModelCheckpoint(config=train_config,
                                  directory=cfg.outputs_dir,
                                  prefix='{}'.format(cfg.local_rank))
        cb_params = _InternalCallbackParam()
        cb_params.train_network = train_net
        cb_params.epoch_num = ckpt_max_num
        cb_params.cur_epoch_num = 1
        run_context = RunContext(cb_params)
        ckpt_cb.begin(run_context)

    train_net.set_train()
    t_end = time.time()
    t_epoch = time.time()
    old_progress = -1

    cfg.logger.important_info('====start train====')
    for i, (data, gt) in enumerate(de_dataloader):
        # clean grad + adjust lr + put data into device + forward + backward + optimizer, return loss
        data = data.astype(np.float32)
        gt = gt.astype(np.float32)
        data = Tensor(data)
        gt = Tensor(gt)

        loss = train_net(data, gt)
        loss_meter.update(loss.asnumpy())

        # ckpt
        if cfg.local_rank == 0:
            cb_params.cur_step_num = i + 1  # current step number
            cb_params.batch_num = i + 2
            ckpt_cb.step_end(run_context)

        # logging loss, fps, ...
        if i == 0:
            time_for_graph_compile = time.time() - create_network_start
            cfg.logger.important_info('{}, graph compile time={:.2f}s'.format(
                cfg.task, time_for_graph_compile))

        if i % cfg.log_interval == 0 and cfg.local_rank == 0:
            time_used = time.time() - t_end
            epoch = int(i / cfg.steps_per_epoch)
            fps = cfg.per_batch_size * (
                i - old_progress) * cfg.world_size / time_used
            cfg.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(
                epoch, i, loss_meter, fps))
            t_end = time.time()
            loss_meter.reset()
            old_progress = i

        if i % cfg.steps_per_epoch == 0 and cfg.local_rank == 0:
            epoch_time_used = time.time() - t_epoch
            epoch = int(i / cfg.steps_per_epoch)
            fps = cfg.per_batch_size * cfg.world_size * cfg.steps_per_epoch / epoch_time_used
            cfg.logger.info(
                '=================================================')
            cfg.logger.info(
                'epoch time: epoch[{}], iter[{}], {:.2f} imgs/sec'.format(
                    epoch, i, fps))
            cfg.logger.info(
                '=================================================')
            t_epoch = time.time()

    cfg.logger.important_info('====train end====')
Esempio n. 6
0
import sys
import logging
import math
import numpy as np
import time

# Importing Keras
from keras.optimizers import Adam
from keras import backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard
from keras.models import load_model
from src.io import checkdir
from src import loss, model, io, log
from src.bf_grid import bf_grid

logger = log.get_logger('training')
log.log2file('training')

parser = argparse.ArgumentParser(
    description='See description below to see all available options')

parser.add_argument('-d',
                    '--data',
                    help='Input directory containing image and label',
                    required=True)

parser.add_argument('-s',
                    '--size',
                    type=int,
                    help='Input size of image to be used. [Default] = 200',
                    default=200,
Esempio n. 7
0
def init_argument():
    """init config argument."""
    parser = argparse.ArgumentParser(
        description='Face Recognition For Tracking')
    parser.add_argument('--device_target',
                        type=str,
                        choices=['Ascend', 'GPU', 'CPU'],
                        default='Ascend',
                        help='device_target')
    parser.add_argument('--is_distributed',
                        type=int,
                        default=0,
                        help='if multi device')
    parser.add_argument('--data_dir',
                        type=str,
                        default='',
                        help='image folders')
    parser.add_argument('--pretrained',
                        type=str,
                        default='',
                        help='pretrained model to load')

    args = parser.parse_args()

    graph_path = os.path.join(
        './graphs_graphmode',
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args.device_target,
                        save_graphs=True,
                        save_graphs_path=graph_path)

    if args.device_target == 'Ascend':
        devid = int(os.getenv('DEVICE_ID'))
        context.set_context(device_id=devid)

    if args.is_distributed == 0:
        if args.device_target == 'Ascend':
            cfg = reid_1p_cfg_ascend
        else:
            cfg = reid_1p_cfg
    else:
        if args.device_target == 'Ascend':
            cfg = reid_8p_cfg_ascend
        else:
            cfg = reid_8p_cfg_gpu
    cfg.pretrained = args.pretrained
    cfg.data_dir = args.data_dir

    # Init distributed
    if args.is_distributed:
        init()
        cfg.local_rank = get_rank()
        cfg.world_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
    else:
        parallel_mode = ParallelMode.STAND_ALONE

    # parallel_mode 'STAND_ALONE' do not support parameter_broadcast and mirror_mean
    context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                      device_num=cfg.world_size,
                                      gradients_mean=True)

    mindspore.common.set_seed(1)

    # logger
    cfg.outputs_dir = os.path.join(
        cfg.ckpt_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    cfg.logger = get_logger(cfg.outputs_dir, cfg.local_rank)

    # Show cfg
    cfg.logger.save_args(cfg)
    return cfg, args