Exemple #1
0
    def __init__(self, editor, software_version):
        self.logger = get_logger("gui")
        self.editor = editor
        self.captured_map_coords = None
        self.profile = self.editor.get_profile('')
        self.profile.aircraft = "hornet"
        self.exit_quick_capture = False
        self.values = None
        self.capturing = False
        self.capture_key = self.editor.settings.get("PREFERENCES",
                                                    "capture_key")
        self.software_version = software_version

        tesseract_path = self.editor.settings['PREFERENCES'].get(
            'tesseract_path', "tesseract")
        self.logger.info(f"Tesseract path is: {tesseract_path}")
        pytesseract.pytesseract.tesseract_cmd = tesseract_path
        try:
            self.tesseract_version = pytesseract.get_tesseract_version()
            self.capture_status = "Status: Not capturing"
            self.capture_button_disabled = False
        except pytesseract.pytesseract.TesseractNotFoundError:
            self.tesseract_version = None
            self.capture_status = "Status: Tesseract not found"
            self.capture_button_disabled = True

        self.logger.info(f"Tesseract version is: {self.tesseract_version}")
        self.window = self.create_gui()
Exemple #2
0
def network_init(args):
    devid = int(os.getenv('DEVICE_ID', '0'))
    context.set_context(mode=context.GRAPH_MODE,
                        enable_auto_mixed_precision=True,
                        device_target=args.device_target,
                        save_graphs=False,
                        device_id=devid)
    # init distributed
    if args.is_distributed:
        if args.device_target == "Ascend":
            init()
        else:
            init("nccl")
        args.rank = get_rank()
        args.group_size = get_group_size()
    # select for master rank save ckpt or all rank save, compatible for model parallel
    args.rank_save_ckpt_flag = 0
    if args.is_save_on_master:
        if args.rank == 0:
            args.rank_save_ckpt_flag = 1
    else:
        args.rank_save_ckpt_flag = 1
    # logger
    args.outputs_dir = os.path.join(
        args.ckpt_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    args.logger = get_logger(args.outputs_dir, args.rank)
    args.logger.save_args(args)
def generate_default_bases():
    default_bases_builder_logger = get_logger("default_bases_builder")

    pgdata = update_base_data(
        "https://raw.githubusercontent.com/Santi871/HornetWaypointEditor/master/data/"
        "pg.json?token=ACQW6PPI77ATCRJ2RZSDSBC44UAOG", f".\\data\\pg.json")

    caucdata = update_base_data(
        "https://raw.githubusercontent.com/Santi871/HornetWaypointEditor/master/data/"
        "cauc.json?token=ACQW6PIVKSD72T7FLOBQHCC44W334", f".\\data\\cauc.json")

    if pgdata and caucdata:
        default_bases_builder_logger.info(
            "PG and Caucasus default bases updated succesfully")
    else:
        default_bases_builder_logger.warning(
            "Failed to update PG and Caucasus default bases")

    for _, _, files in walk(".\\data"):
        for filename in files:
            if ".json" in filename:
                with open(".\\data\\" + filename, "r") as f:
                    try:
                        load_base_data(json.load(f), default_bases)
                        default_bases_builder_logger.info(
                            f"Default base data built succesfully from file: {filename}"
                        )
                    except AttributeError:
                        default_bases_builder_logger.warning(
                            f"Failed to build default base data from file: {filename}",
                            exc_info=True)
def test_get_logger_lvl(set_env, value_env, expected_lvl):

    logger = get_logger(__name__)
    if set_env:
        os.environ['LOG_LEVEL'] = value_env
    logger_lvl = get_logger_level()
    assert logger
    assert expected_lvl == logger_lvl
Exemple #5
0
 def __init__(self, settings):
     self.logger = get_logger("editor")
     self.settings = settings
     self.handler = AircraftInterface(settings)
     self.db = DatabaseInterface(settings['PREFERENCES'].get(
         "DB_Name", "profiles.db"))
     self.default_bases = default_bases
     self.wps_list = list()
     self.msns_list = list()
def first_time_setup():
    default_dcs_path = f"{str(Path.home())}\\Saved Games\\DCS.openbeta\\"

    setup_logger = get_logger("setup")
    setup_logger.info("Running first time setup...")

    gui = first_time_setup_gui()

    while True:
        event, values = gui.Read()
        if event is None:
            return False

        dcs_path = values.get("dcs_path")
        if dcs_path is not None and not dcs_path.endswith(
                "\\") and not dcs_path.endswith("/"):
            dcs_path = dcs_path + "\\"

        if event == "accept_button":
            break
        elif event == "install_button":
            try:
                install_dcs_bios(dcs_path)
                gui.Element("install_button").Update(disabled=True)
                gui.Element("accept_button").Update(disabled=False)
                gui.Element("dcs_bios").Update(value="Installed")
            except (FileExistsError, FileNotFoundError):
                gui.Element("dcs_bios").Update(value="Failed to install")
                setup_logger.error("DCS-BIOS failed to install", exc_info=True)
        elif event == "dcs_path":
            dcs_bios_detected = detect_dcs_bios(values["dcs_path"])
            if dcs_bios_detected:
                gui.Element("install_button").Update(disabled=True)
                gui.Element("accept_button").Update(disabled=False)
                gui.Element("dcs_bios").Update(value="Detected")
            else:
                gui.Element("install_button").Update(disabled=False)
                gui.Element("accept_button").Update(disabled=True)
                gui.Element("dcs_bios").Update(value="Not detected")

    config = ConfigParser()
    config.add_section("PREFERENCES")
    config.set("PREFERENCES", "grace_period", "5")
    config.set("PREFERENCES", "tesseract_path", values.get("tesseract_path"))
    config.set("PREFERENCES", "dcs_path", dcs_path or default_dcs_path)
    config.set("PREFERENCES", "db_name", "profiles_new.db")
    config.set("PREFERENCES", "capture_key",
               values.get("capture_key") or "ctrl+t")
    config.set("PREFERENCES", "log_raw_tesseract_output", "false")

    with open("settings.ini", "w+") as f:
        config.write(f)

    setup_logger.info("First time setup completed succesfully")
    gui.Close()
    return True
 def __init__(self, settings):
     self.logger = get_logger("driver")
     self.settings = settings
     self.db = DatabaseInterface(settings['PREFERENCES'].get(
         "DB_Name", "profiles.db"))
     self.default_bases = default_bases
     self.drivers = dict(hornet=HornetDriver(self.logger),
                         harrier=HarrierDriver(self.logger),
                         mirage=MirageDriver(self.logger))
     self.driver = None
Exemple #8
0
def train(params):
    """
    Tuning English model for XNLI task
    """
    # logging the results
    logger = get_logger(params, 'tune_{}_xnli.log'.format(params.src_lang))
    model = XNLINet(params)
    model = model.cuda()

    optimizer = torch.optim.Adam(
        list(model.encoder.encoder.layer.parameters()) +
        list(model.pred.parameters()),
        lr=params.lr)

    train_file = os.path.join(params.data_path, f"{params.src_lang}.train.pth")
    valid_file = os.path.join(params.data_path, f"{params.src_lang}.valid.pth")

    train_data = XNLIDataset(train_file)
    valid_data = XNLIDataset(valid_file)

    train_loader = DataLoader(train_data,
                              num_workers=4,
                              batch_size=params.batch_size,
                              shuffle=True)
    valid_loader = DataLoader(valid_data,
                              num_workers=4,
                              batch_size=params.batch_size)

    best_valid_acc = 0
    n_iter = 0
    for epoch in range(1, params.max_epoch):
        for batch in train_loader:
            inputs = truncate(batch['x'])
            output = model(inputs.cuda())
            loss = F.cross_entropy(output, batch['y'].cuda())
            loss.backward()
            if n_iter % params.grad_acc_steps == 0:
                optimizer.step()
                optimizer.zero_grad()
            n_iter += 1
            if n_iter % 50 == 0:
                logger.info("epoch {} - iter {} | XNLI loss {:.4f}".format(
                    epoch, n_iter, loss.item()))
            if n_iter % params.epoch_size == 0:
                logger.info('run evaluation')
                val_acc = evaluate(model, valid_loader)
                logger.info(
                    "epoch {} - iter {} | XNLI validation acc {:.4f}".format(
                        epoch, n_iter, val_acc))
                if val_acc > best_valid_acc:
                    logger.info(f'save best model: {params.xnli_model}')
                    best_valid_acc = val_acc
                    torch.save(model.state_dict(), params.xnli_model)
    logger.info('=== End of epoch ===')
def test_get_logger():
    path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                        "test.log")
    os.environ['LOG_PATH'] = path
    log = get_logger(__name__)
    log.info("test")

    with open(path) as f:
        read_data = f.read()

    os.remove(path)
    assert read_data.find("test\n") > 0
 def __init__(self, settings):
     self.logger = get_logger("driver")
     self.settings = settings
     self.db = DatabaseInterface(settings['PREFERENCES'].get(
         "DB_Name", "profiles.db"))
     self.default_bases = default_bases
     self.drivers = dict(hornet=HornetDriver(self.logger, settings),
                         harrier=HarrierDriver(self.logger, settings),
                         mirage=MirageDriver(self.logger, settings),
                         tomcat=TomcatDriver(self.logger, settings),
                         warthog=WarthogDriver(self.logger, settings),
                         viper=ViperDriver(self.logger, settings))
     self.driver = self.drivers["hornet"]
Exemple #11
0
    def __init__(self, editor, software_version):
        self.logger = get_logger("gui")
        self.editor = editor
        self.captured_map_coords = None
        self.profile = Profile('')
        self.profile.aircraft = "hornet"
        self.exit_quick_capture = False
        self.values = None
        self.capturing = False
        self.capture_key = try_get_setting(self.editor.settings, "capture_key",
                                           "ctrl+t")
        self.quick_capture_hotkey = try_get_setting(self.editor.settings,
                                                    "quick_capture_hotkey",
                                                    "ctrl+alt+t")
        self.enter_aircraft_hotkey = try_get_setting(self.editor.settings,
                                                     "enter_aircraft_hotkey",
                                                     "ctrl+shift+t")
        self.software_version = software_version
        self.is_focused = True
        self.scaled_dcs_gui = False
        self.selected_wp_type = "WP"

        try:
            with open(
                    f"{self.editor.settings.get('PREFERENCES', 'dcs_path')}\\Config\\options.lua",
                    "r") as f:
                dcs_settings = lua.decode(f.read().replace("options = ", ""))
                self.scaled_dcs_gui = dcs_settings["graphics"]["scaleGui"]
        except (FileNotFoundError, ValueError, TypeError):
            self.logger.error("Failed to decode DCS settings", exc_info=True)

        tesseract_path = self.editor.settings['PREFERENCES'].get(
            'tesseract_path', "tesseract")
        self.logger.info(f"Tesseract path is: {tesseract_path}")
        pytesseract.pytesseract.tesseract_cmd = tesseract_path
        try:
            self.tesseract_version = pytesseract.get_tesseract_version()
            self.capture_status = "Status: Not capturing"
            self.capture_button_disabled = False
        except pytesseract.pytesseract.TesseractNotFoundError:
            self.tesseract_version = None
            self.capture_status = "Status: Tesseract not found"
            self.capture_button_disabled = True

        self.logger.info(f"Tesseract version is: {self.tesseract_version}")
        self.window = self.create_gui()
        keyboard.add_hotkey(self.quick_capture_hotkey,
                            self.toggle_quick_capture)
        if self.enter_aircraft_hotkey != '':
            keyboard.add_hotkey(self.enter_aircraft_hotkey,
                                self.enter_coords_to_aircraft)
Exemple #12
0
def handle(ws, config_index, socket_dict):
    websocket_config = WEBSOCKET_LIST[config_index]

    logger = get_logger()
    user_id = save_ws(ws, websocket_config['func_name'], logger, socket_dict)

    while not ws.closed:
        try:
            send_msg(ws, socket_dict, websocket_config)

        except WebSocketError as e:
            logger.error(e)
            if not ws.closed:
                ws.send(websocket_config['fail_message'])

    save = socket_dict[websocket_config['func_name']][user_id]
    if save.closed:
        del socket_dict[websocket_config['func_name']][user_id]
Exemple #13
0
def test(params):
    """
    zero-shot testing of XNLI
    """
    logger = get_logger(params, f'test_{params.tgt_lang}_xnli.log')
    model = XNLINet(params)
    logger.info(f"| load: {params.xnli_model}")
    model.load_state_dict(torch.load(params.xnli_model))
    model.cuda()

    test_file = os.path.join(params.data_path, f"{params.tgt_lang}.test.pth")
    logger.info(f"| load test data: {test_file}")
    data = XNLIDataset(test_file)

    test_loader = DataLoader(data, num_workers=4, batch_size=params.batch_size)
    acc = evaluate(model, test_loader)

    logger.info(f'Zero-shot XNLI-{params.tgt_lang} accuracy: {acc:.1f}')
Exemple #14
0
"""
stego-retweet.
"""
import argparse
import getpass
import hashlib
import random

from src import logger
from src.twitter import load_words, hide, unhide, send_message, read_message

log = logger.get_logger(__name__)


def get_args() -> tuple:
    """
    Function to parse program arguments.
    """
    parser = argparse.ArgumentParser(
        description='Stego-retweet is a tool for hiding messages in Twitter using retweets. With this tool you can \
hide two chars per retweet.'
    )

    parser.add_argument(
        '-m',
        '--mode',
        help='Mode of execution, to send or recieve messages.',
        choices=['send', 'recv']
    )

    parser.add_argument(
Exemple #15
0
def parse_args():
    parser = configargparse.ArgumentParser(
        description='Training Wikinet 2',
        formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
    # General
    general = parser.add_argument_group('General Settings.')
    general.add_argument('--my-config',
                         required=True,
                         is_config_file=True,
                         help='config file path')
    general.add_argument('--seed',
                         type=int,
                         default=-1,
                         help="Initialization seed")
    general.add_argument('--exp_name',
                         type=str,
                         default="debug",
                         help="Experiment name")
    general.add_argument("--debug",
                         type=str2bool,
                         default=True,
                         help="whether to debug")

    # Data
    data = parser.add_argument_group('Data Settings.')
    data.add_argument('--data_path', type=str, help='location of data dir')
    data.add_argument('--yamada_model', type=str, help='name of yamada model')
    data.add_argument('--data_type',
                      type=str,
                      choices=['conll', 'wiki', 'proto'],
                      help='whether to train with conll or wiki')
    data.add_argument('--num_shards',
                      type=int,
                      help='number of shards of training file')
    data.add_argument('--train_size',
                      type=int,
                      help='number of training abstracts')
    data.add_argument('--mmap', type=str2bool, help='use dicts or mmaps')

    # Max Padding
    padding = parser.add_argument_group('Max Padding for batch.')
    padding.add_argument('--max_context_size',
                         type=int,
                         help='max number of context')
    padding.add_argument('--max_ent_size',
                         type=int,
                         help='max number of entities considered in abstract')
    padding.add_argument('--num_docs',
                         type=int,
                         help='max number of docs to use to create corpus vec')
    padding.add_argument('--ignore_init',
                         type=str2bool,
                         help='whether to ignore first five tokens of context')

    # Model Type
    model_selection = parser.add_argument_group('Type of model to train.')
    model_selection.add_argument('--model_name',
                                 type=str,
                                 help='name of model to train')
    model_selection.add_argument(
        '--pre_train',
        type=str,
        help='if specified, model will load state dict, must be ckpt')

    # Model params
    model_params = parser.add_argument_group("Parameters for chosen model.")
    model_params.add_argument('--dp', type=float, help='drop out')
    model_params.add_argument('--hidden_size',
                              type=int,
                              help='size of hidden layer in yamada model')

    # Candidate Generation
    candidate = parser.add_argument_group('Candidate generation.')
    candidate.add_argument('--cand_type',
                           choices=['necounts', 'pershina'],
                           help='whether to use pershina candidates')
    candidate.add_argument('--cand_gen_rand',
                           type=str2bool,
                           help='whether to generate random candidates')
    candidate.add_argument("--num_candidates",
                           type=int,
                           default=32,
                           help="Total number of candidates")
    candidate.add_argument("--prop_gen_candidates",
                           type=float,
                           default=0.5,
                           help="Proportion of candidates generated")
    candidate.add_argument("--coref",
                           type=str2bool,
                           default=False,
                           help="Whether to use coref cands")

    # Training
    training = parser.add_argument_group("Training parameters.")
    training.add_argument("--num_epochs",
                          type=int,
                          default=5,
                          help="Number of epochs")
    training.add_argument("--save_every",
                          type=int,
                          default=5,
                          help="how often to checkpoint")
    training.add_argument("--patience",
                          type=int,
                          default=5,
                          help="Patience for early stopping")
    training.add_argument("--batch_size",
                          type=int,
                          default=32,
                          help="Batch size")
    training.add_argument("--num_workers",
                          type=int,
                          default=4,
                          help="number of workers for data loader")
    training.add_argument('--lr', type=float, help='learning rate')
    training.add_argument('--wd', type=float, help='weight decay')
    training.add_argument('--embs_optim',
                          type=str,
                          choices=['adagrad', 'adam', 'rmsprop', 'sparseadam'],
                          help='optimizer for embeddings')
    training.add_argument(
        '--other_optim',
        type=str,
        choices=['adagrad', 'adam', 'rmsprop'],
        help='optimizer for paramaters that are not embeddings')
    training.add_argument('--sparse', type=str2bool, help='sparse gradients')

    # Loss
    loss = parser.add_argument_group('Type of loss.')
    loss.add_argument('--loss_func',
                      type=str,
                      default='cross_entropy',
                      choices=['cross_entropy', 'cosine'],
                      help='loss function')
    loss.add_argument('--margin', type=float, help='margin of hinge loss')

    # cuda
    parser.add_argument("--device", type=str, help="cuda device")
    parser.add_argument("--use_cuda", type=str2bool, help="use gpu or not")
    parser.add_argument("--profile",
                        type=str2bool,
                        help="whether to run profiler on dataloader and exit")

    args = parser.parse_args()
    logger = get_logger(args)

    if args.wd > 0:
        assert not args.sparse

    if args.use_cuda:
        devices = args.device.split(",")
        if len(devices) > 1:
            devices = tuple([int(device) for device in devices])
        else:
            devices = int(devices[0])
        args.__dict__['device'] = devices

    logger.info("Experiment Parameters:")
    print()
    for arg in sorted(vars(args)):
        logger.info('{:<15}\t{}'.format(arg, getattr(args, arg)))

    model_date_dir = join(args.data_path, 'models',
                          '{}'.format(datetime.now().strftime("%Y_%m_%d")))
    if not os.path.exists(model_date_dir):
        os.makedirs(model_date_dir)
    model_dir = join(model_date_dir, args.exp_name)
    args.__dict__['model_dir'] = model_dir
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    return args, logger, model_dir
'''
Created on Jan 13, 2014

@author: Vincent Ketelaars
'''
import netifaces
from src.tools.network_interface import Interface, AF_INET, AF_INET6
from src.logger import get_logger
logger = get_logger(__name__)

def get_interface_addresses(version=AF_INET):
    """
    Yields Interface instances for each available AF_INET interface found.

    An Interface instance has the following properties:
    - name          (i.e. "eth0")
    - address       (i.e. "10.148.3.254")
    - netmask       (i.e. "255.255.255.0")
    - broadcast     (i.e. "10.148.3.255")
    """

    for interface in netifaces.interfaces():
        try:
            addresses = netifaces.ifaddresses(interface)
        except ValueError:
            # some interfaces are given that are invalid, we encountered one called ppp0
            yield Interface(interface, None, None, None)
        else:
            if version == AF_INET:
                for option in addresses.get(netifaces.AF_INET, []):
                    try:
Exemple #17
0
    assert not params.visualize or params.evaluate
    assert not params.human_player or params.evaluate and params.visualize
    assert not params.evaluate or params.reload
    assert not params.reload or os.path.isfile(params.reload)

    # run scenario game
    module = importlib.import_module('src.' + params.scenario,
                                     package=__name__)
    module.main(parser, args)


parser = argparse.ArgumentParser(description='Arnold runner')
parser.add_argument("--main_dump_path",
                    type=str,
                    default="./dumped",
                    help="Main dump path")
parser.add_argument("--exp_name",
                    type=str,
                    default="default",
                    help="Experiment name")
args, remaining = parser.parse_known_args()

# create a directory for the experiment / create a logger
dump_path = get_dump_path(args.main_dump_path, args.exp_name)
logger = get_logger(filepath=os.path.join(dump_path, 'train.log'))
logger.info('========== Running DOOM ==========')
logger.info('Experiment will be saved in: %s' % dump_path)

# load DOOM
parse_game_args(remaining + ['--dump_path', dump_path])
Exemple #18
0
def test():
    """test method"""

    # init distributed
    if args.is_distributed:
        init()
        args.rank = get_rank()
        args.group_size = get_group_size()

    # logger
    args.outputs_dir = os.path.join(args.log_path,
                                    datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))

    args.logger = get_logger(args.outputs_dir, args.rank)

    context.reset_auto_parallel_context()
    if args.is_distributed:
        parallel_mode = ParallelMode.DATA_PARALLEL
    else:
        parallel_mode = ParallelMode.STAND_ALONE
    context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)

    args.logger.info('Creating Network....')
    network = YOLOV4CspDarkNet53(is_training=False)

    args.logger.info(args.pretrained)
    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('yolo_network.'):
                param_dict_new[key[13:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        args.logger.info('load_model {} success'.format(args.pretrained))
    else:
        args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))
        assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))
        exit(1)

    data_root = args.data_root
    # annFile = args.annFile

    config = ConfigYOLOV4CspDarkNet53()
    if args.testing_shape:
        config.test_img_shape = conver_testing_shape(args)

    data_txt = os.path.join(args.data_dir, 'testdev2017.txt')
    ds, data_size = create_yolo_datasetv2(data_root, data_txt=data_txt, batch_size=args.per_batch_size,
                                          max_epoch=1, device_num=args.group_size, rank=args.rank, shuffle=False,
                                          config=config)

    args.logger.info('testing shape : {}'.format(config.test_img_shape))
    args.logger.info('totol {} images to eval'.format(data_size))

    network.set_train(False)

    # init detection engine
    detection = DetectionEngine(args)

    input_shape = Tensor(tuple(config.test_img_shape), ms.float32)
    args.logger.info('Start inference....')
    for i, data in enumerate(ds.create_dict_iterator()):
        image = Tensor(data["image"])

        image_shape = Tensor(data["image_shape"])
        image_id = Tensor(data["img_id"])

        prediction = network(image, input_shape)
        output_big, output_me, output_small = prediction
        output_big = output_big.asnumpy()
        output_me = output_me.asnumpy()
        output_small = output_small.asnumpy()
        image_id = image_id.asnumpy()
        image_shape = image_shape.asnumpy()

        detection.detect([output_small, output_me, output_big], args.per_batch_size, image_shape, image_id)
        if i % 1000 == 0:
            args.logger.info('Processing... {:.2f}% '.format(i * args.per_batch_size / data_size * 100))

    args.logger.info('Calculating mAP...')
    detection.do_nms_for_results()
    result_file_path = detection.write_result()
    args.logger.info('result file path: {}'.format(result_file_path))
    def __init__(self,
                 data_dict: Dict[str, Dict],
                 analysis_label: str,
                 global_lumi: Optional[float] = 139.,
                 output_dir: str = None,
                 data_dir: str = None,
                 log_level: int = 20,
                 log_out: str = 'both',
                 timedatelog: bool = True,
                 separate_loggers: bool = False,
                 **kwargs):
        """
        :param data_dict: Dictionary of dictionaries containing paths to root files and the tree to extract from each.
               The key to the top-level dictionary is the label assigned to the dataset.
        :param global_lumi: All data will be scaled to this luminosity (fb-1)
        :param output_dir: Root directory for outputs
        :param data_dir: Root directory for pickle data in/out
        :param log_level: Logging level. Default INFO. See https://docs.python.org/3/library/logging.html#logging-levels
        :param log_out: Where to set log output: 'FILE', 'CONSOLE' or 'BOTH'. (case-insensitive)
        :param timedatelog: Whether to output log filename with timedate
               (useful to turn off for testing or you'll be flooded with log files)
        :param separate_loggers: Whether each dataset should output logs to separate log files
        :param kwargs: Options arguments to pass to all dataset builders
        """
        self.name = analysis_label
        if self.name in data_dict:
            raise SyntaxError(
                "Analysis must have different name to any dataset")

        # SET OUTPUT DIRECTORIES
        # ===========================
        if not output_dir:
            # root in the directory above this one
            output_dir = os.path.dirname(
                os.path.dirname(os.path.abspath(__file__)))
        output_dir = output_dir + '/outputs/' + analysis_label + '/'  # where outputs go
        self.paths = {
            'plot_dir': output_dir + '/plots/',  # where plots go
            'pkl_df_dir': data_dir if data_dir else output_dir +
            '/data/',  # pickle file directory
            'pkl_hist_dir': output_dir +
            '/histograms/',  # pickle file to place histograms into
            'backup_cutfiles_dir':
            output_dir + '/cutfiles/',  # cutfile backups
            'latex_dir':
            output_dir + '/LaTeX/',  # where to print latex cutflow table
            'log_dir': output_dir + '/logs/',
        }
        for path in self.paths:
            file_utils.makedir(self.paths[path])

        # LOGGING
        # ============================
        self.logger = get_logger(name=self.name,
                                 log_dir=self.paths['log_dir'],
                                 log_level=log_level,
                                 log_out=log_out,
                                 timedatelog=timedatelog)

        # SET OTHER GLOBAL OPTIONS
        # ============================
        self.name = analysis_label
        self.global_lumi = global_lumi

        # BUILD DATASETS
        # ============================
        self.datasets: Dict[str, Dataset] = dict()
        for name, data_args in data_dict.items():
            self.logger.info("")
            self.logger.info("=" * (42 + len(self.name)))
            self.logger.info(
                f"======== INITIALISING DATASET '{self.name}' =========")
            self.logger.info("=" * (42 + len(self.name)))

            if dup_args := set(data_args) & set(kwargs):
                raise SyntaxError(
                    f"Got multiple values for argument(s) {dup_args} for dataset {name}"
                )

            # get dataset build arguments out of options passed to analysis
            build_args = dict()
            for arg in [
                    'data_path', 'pkl_path', 'cutfile_path', 'cutfile',
                    'tree_dict', 'vars_to_calc', 'cuts'
            ]:
                build_args[arg] = data_args.pop(arg, None)

            # set correct pickle path if passed as a build argument
            if build_args['pkl_path'] is None:
                build_args[
                    'pkl_path'] = f"{self.paths['pkl_df_dir']}{name}_df.pkl"

            # check if a pickle file already exists if not already given
            # avoids rebuilding dataset unnecessarily
            if file_utils.file_exists(build_args['pkl_path']):
                self.logger.debug(
                    f"Found pickle file at {build_args['pkl_path']}. Passing to builder"
                )

            # make dataset
            builder = DatasetBuilder(
                name=name,
                logger=(
                    self.logger  # use single logger
                    if not separate_loggers else
                    get_logger(  # if seperate, make new logger for each Dataset
                        name=name,
                        log_dir=self.paths['log_dir'],
                        log_level=log_level,
                        log_out=log_out,
                        timedatelog=timedatelog)),
                **data_args,
                **kwargs)
            dataset = builder.build(**build_args)
            if separate_loggers:
                # set new logger to append to analysis logger
                dataset.logger = self.logger
                dataset.logger.debug(
                    f"{name} log handler returned to analysis.")  # test

            dataset.dsid_metadata_printout()

            dataset.set_plot_dir(self.paths['plot_dir'])
            dataset.set_pkl_path(build_args['pkl_path'])

            # save dataset no matter what
            dataset.save_pkl_file()

            self[name] = dataset  # save to analysis

            self.logger.info("=" * (42 + len(self.name)))
            self.logger.info(
                f"========= DATASET '{self.name}' INITIALISED =========")
            self.logger.info("=" * (42 + len(self.name)))
            self.logger.info("")
Exemple #20
0
from src.logger import get_logger
import ast
import src.cAST as cAST

logger = get_logger('Visitor')


class Visitor(ast.NodeVisitor):
    """Visitor class to traverse ast
    While traversing existing ast, a custom AST (cAST) is persisted.
    cAST takes in to account imports in code and their aliases in order to homogenise the ast.
    """
    def __init__(self, inbuild_imports, sys_imports):
        self.custom_ast = None
        self.inbuild_imports = inbuild_imports
        self.sys_imports = sys_imports
        self.aliases = dict()

    def set_custom_ast(self, cast):
        self.custom_ast = cast

    def get_custom_ast(self) -> cAST:
        return self.custom_ast

    def save_alias(self, alias: str, original: str):
        self.aliases.update({alias: original})

    # =============================================================================================
    # Utils Visitor
    # =============================================================================================
Exemple #21
0
sys.path.append("..")

import bottle
from bottle import route, run, static_file, put
from bottle import request, response, redirect, error, abort
import json
from bson.objectid import ObjectId

from src.logger import get_logger
from src.constants import *
from src.utils import *

sys.path.append(CONFIG_FILE_PATH)
from config import *

log = get_logger(logFileName="webServer.log")

con = get_mongo_connection()
db = get_banking_db(con)

ROOT_PATH = "/opt/boss/web/client"

def json_friendly(obj):
    if not obj or type(obj) in (int, float, str, unicode, bool, long):
        return obj
    if type(obj) == datetime.datetime:
        return obj.strftime('%Y-%m-%dT%H:%M:%S')
    if type(obj) == dict:
        for k in obj:
            obj[k] = json_friendly(obj[k])
        return obj
Exemple #22
0
def parse_args():
    parser = configargparse.ArgumentParser(
        description='Training Wikinet 2',
        formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
    # General
    general = parser.add_argument_group('General Settings.')
    general.add_argument('--my-config',
                         required=True,
                         is_config_file=True,
                         help='config file path')
    general.add_argument('--seed',
                         type=int,
                         default=-1,
                         help="Initialization seed")
    general.add_argument('--exp_name',
                         type=str,
                         default="debug",
                         help="Experiment name")
    general.add_argument('--debug',
                         type=str2bool,
                         default=True,
                         help="whether to debug")
    general.add_argument('--error',
                         type=str2bool,
                         default=True,
                         help="whether to print out errors after every epoch.")

    # Data
    data = parser.add_argument_group('Data Settings.')
    data.add_argument('--data_path', type=str, help='location of data dir')
    data.add_argument('--yamada_model', type=str, help='name of yamada model')
    data.add_argument('--data_type', type=str, help='dataset to train on.')
    data.add_argument('--pre_train',
                      type=str,
                      help='if pre train, then the ckpt of model.')
    data.add_argument('--train_size',
                      type=int,
                      help='number of training abstracts')
    data.add_argument('--eval_sample',
                      type=int,
                      help='number of strs to evaluate')

    # Max Padding
    padding = parser.add_argument_group('Max Padding for batch.')
    padding.add_argument('--max_char_size',
                         type=int,
                         help='max number of words')

    # Model params
    model_params = parser.add_argument_group("Parameters for chosen model.")
    model_params.add_argument('--char_dim',
                              type=int,
                              help='dimension of char embeddings')
    model_params.add_argument('--hidden_size',
                              type=int,
                              help='latent code size')
    model_params.add_argument('--dp', type=float, help='drop out')
    model_params.add_argument('--norm',
                              type=str2bool,
                              help='whether to normalize latent code')
    model_params.add_argument('--activate',
                              type=str,
                              help='activation function after dropout')
    model_params.add_argument('--measure',
                              type=str,
                              default='ip',
                              choices=['ip', 'l2'],
                              help='faiss index')

    # Training
    train_params = parser.add_argument_group("Training parameters.")
    train_params.add_argument("--num_epochs",
                              type=int,
                              default=5,
                              help="Number of epochs")
    train_params.add_argument("--batch_size",
                              type=int,
                              default=32,
                              help="Batch size")
    train_params.add_argument('--lr', type=float, help='learning rate')
    train_params.add_argument('--wd', type=float, help='weight decay')

    # cuda and profiler
    parser.add_argument("--device", type=str, help="cuda device")
    parser.add_argument("--use_cuda", type=str2bool, help="use gpu or not")
    parser.add_argument("--profile",
                        type=str2bool,
                        help="whether to run profiler on dataloader and exit")

    args = parser.parse_args()
    logger = get_logger(args)

    # Setup
    if args.use_cuda:
        devices = args.device.split(",")
        if len(devices) > 1:
            devices = tuple([int(device) for device in devices])
        else:
            devices = int(devices[0])
        args.__dict__['device'] = devices

    logger.info("Experiment Parameters")
    for arg in sorted(vars(args)):
        logger.info('{:<15}\t{}'.format(arg, getattr(args, arg)))

    model_date_dir = join(args.data_path, 'models',
                          '{}'.format(datetime.now().strftime("%Y_%m_%d")))
    if not os.path.exists(model_date_dir):
        os.makedirs(model_date_dir)
    model_dir = join(model_date_dir, args.exp_name)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    return args, logger, model_dir
Exemple #23
0
from flask import Flask, jsonify
from src.database.common import execute
from src.database.queries import CREATE_TASK, INSPECT_TASK
from src.logger import get_logger

LOGGER_NAME = 'rest_api'
LOGGER = get_logger(LOGGER_NAME)

app = Flask(__name__)


@app.route('/tasks/<int:task_id>/')
def inspect_task(task_id):
    """ Return information about a task's status """
    task_row = execute(INSPECT_TASK, (task_id, ), logger=LOGGER)
    if not task_row:
        return jsonify('Task not found'), 404

    result = {
        'status': task_status(task_row),
        'create_time': task_row['create_time'],
        'start_time': task_row['start_time'],
        'time_to_execute': task_row['time_to_execute'],
    }
    return jsonify(result)


@app.route('/tasks/', methods=['POST'])
def create_task():
    """ Create a new task in the database and return its id """
    result = execute(CREATE_TASK, logger=LOGGER)
Exemple #24
0
import argparse
import os
import vizdoom
from src.utils import get_dump_path
from src.logger import get_logger
from src.args import parse_game_args


parser = argparse.ArgumentParser(description='Arnold runner')
parser.add_argument("--main_dump_path", type=str, default="./dumped",
                    help="Main dump path")
parser.add_argument("--exp_name", type=str, default="default",
                    help="Experiment name")
args, remaining = parser.parse_known_args()
assert len(args.exp_name.strip()) > 0

# create a directory for the experiment / create a logger
dump_path = get_dump_path(args.main_dump_path, args.exp_name)
logger = get_logger(filepath=os.path.join(dump_path, 'train.log'))
logger.info('========== Running DOOM ==========')
logger.info('Experiment will be saved in: %s' % dump_path)

# load DOOM
parse_game_args(remaining + ['--dump_path', dump_path])
Exemple #25
0
import _thread as thread
import json
import threading
import time
from typing import NoReturn
import websocket

import src.cfg as cfg
import src.logger as logger
import src.commands as commands
import src.messages as messages
import src.mqtt as mqtt

log = logger.get_logger("WebSocket")
COMMAND_QUEUE = commands.get_command_queue()


def get_stove_info():
    """
    Get Stove information every `INFO_INTERVAL` seconds.
    """
    threading.Timer(cfg.INFO_INTERVAL, get_stove_info).start()
    command, value = commands.get_mcz_command("get_info"), 0
    COMMAND_QUEUE.put((command, value))


def on_open(ws: websocket.WebSocketApp) -> NoReturn:
    """
    Queue messages consumer. It sends the encoded messages to MCZ Musa Web Socket.
    """
    log.info("Successfully connected. Consuming message on MQTT queue")
def parse_args():
    """Parse train arguments."""
    parser = argparse.ArgumentParser('mindspore coco training')

    # device related
    parser.add_argument(
        '--device_target',
        type=str,
        default='Ascend',
        choices=['Ascend', 'GPU'],
        help='device where the code will be implemented. (Default: Ascend)')

    # dataset related
    parser.add_argument('--per_batch_size',
                        default=32,
                        type=int,
                        help='Batch size for Training. Default: 32.')

    # network related
    parser.add_argument('--pretrained_backbone',
                        default='',
                        type=str,
                        help='The ckpt file of CspDarkNet53. Default: "".')
    parser.add_argument(
        '--resume_yolov4',
        default='',
        type=str,
        help='The ckpt file of YOLOv4, which used to fine tune. Default: ""')

    # optimizer and lr related
    parser.add_argument(
        '--lr_scheduler',
        default='cosine_annealing',
        type=str,
        help=
        'Learning rate scheduler, options: exponential, cosine_annealing. Default: exponential'
    )
    parser.add_argument('--lr',
                        default=0.012,
                        type=float,
                        help='Learning rate. Default: 0.001')
    parser.add_argument(
        '--lr_epochs',
        type=str,
        default='220,250',
        help=
        'Epoch of changing of lr changing, split with ",". Default: 220,250')
    parser.add_argument(
        '--lr_gamma',
        type=float,
        default=0.1,
        help='Decrease lr by a factor of exponential lr_scheduler. Default: 0.1'
    )
    parser.add_argument(
        '--eta_min',
        type=float,
        default=0.,
        help='Eta_min in cosine_annealing scheduler. Default: 0')
    parser.add_argument(
        '--T_max',
        type=int,
        default=320,
        help='T-max in cosine_annealing scheduler. Default: 320')
    parser.add_argument('--max_epoch',
                        type=int,
                        default=320,
                        help='Max epoch num to train the model. Default: 320')
    parser.add_argument('--warmup_epochs',
                        default=20,
                        type=float,
                        help='Warmup epochs. Default: 0')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=0.0005,
                        help='Weight decay factor. Default: 0.0005')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        help='Momentum. Default: 0.9')

    # loss related
    parser.add_argument('--loss_scale',
                        type=int,
                        default=64,
                        help='Static loss scale. Default: 1024')
    parser.add_argument('--label_smooth',
                        type=int,
                        default=0,
                        help='Whether to use label smooth in CE. Default:0')
    parser.add_argument(
        '--label_smooth_factor',
        type=float,
        default=0.1,
        help='Smooth strength of original one-hot. Default: 0.1')

    # logging related
    parser.add_argument('--log_interval',
                        type=int,
                        default=100,
                        help='Logging interval steps. Default: 100')
    parser.add_argument('--ckpt_path',
                        type=str,
                        default='outputs/',
                        help='Checkpoint save location. Default: outputs/')
    parser.add_argument('--ckpt_interval',
                        type=int,
                        default=None,
                        help='Save checkpoint interval. Default: None')

    # distributed related
    parser.add_argument(
        '--is_distributed',
        type=int,
        default=1,
        help='Distribute train or not, 1 for yes, 0 for no. Default: 1')
    parser.add_argument('--rank',
                        type=int,
                        default=0,
                        help='Local rank of distributed. Default: 0')
    parser.add_argument('--group_size',
                        type=int,
                        default=1,
                        help='World size of device. Default: 1')

    # roma obs
    parser.add_argument('--data_url',
                        required=True,
                        default=None,
                        help='Location of data.')
    parser.add_argument('--train_url',
                        required=True,
                        default=None,
                        help='Location of training outputs.')

    # reset default config
    parser.add_argument('--training_shape',
                        type=str,
                        default="",
                        help='Fix training shape. Default: ""')
    parser.add_argument(
        '--resize_rate',
        type=int,
        default=10,
        help='Resize rate for multi-scale training. Default: None')

    args, _ = parser.parse_known_args()
    if args.lr_scheduler == 'cosine_annealing' and args.max_epoch > args.T_max:
        args.T_max = args.max_epoch

    args.lr_epochs = list(map(int, args.lr_epochs.split(',')))

    # init distributed
    if args.is_distributed:
        if args.device_target == "Ascend":
            init()
        else:
            init("nccl")
        args.rank = get_rank()
        args.group_size = get_group_size()

    # logger
    args.outputs_dir = os.path.join(
        args.ckpt_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    args.logger = get_logger(args.outputs_dir, args.rank)
    args.logger.save_args(args)

    return args
Exemple #27
0
def test():
    """The function of eval."""
    start_time = time.time()
    args = parse_args()

    # logger
    args.outputs_dir = os.path.join(
        args.log_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    rank_id = int(os.environ.get('RANK_ID'))
    args.logger = get_logger(args.outputs_dir, rank_id)

    context.reset_auto_parallel_context()
    parallel_mode = ParallelMode.STAND_ALONE
    context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                      gradients_mean=True,
                                      device_num=1)

    args.logger.info('Creating Network....')
    network = YOLOV3DarkNet53(is_training=False)

    config = ConfigYOLOV3DarkNet53()
    if args.testing_shape:
        config.test_img_shape = conver_testing_shape(args)

    # convert fusion network to quantization aware network
    if config.quantization_aware:
        network = quant.convert_quant_network(network,
                                              bn_fold=True,
                                              per_channel=[True, False],
                                              symmetric=[True, False])

    args.logger.info(args.pretrained)
    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('yolo_network.'):
                param_dict_new[key[13:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        args.logger.info('load_model {} success'.format(args.pretrained))
    else:
        args.logger.info('{} not exists or not a pre-trained file'.format(
            args.pretrained))
        assert FileNotFoundError(
            '{} not exists or not a pre-trained file'.format(args.pretrained))
        exit(1)

    data_root = args.data_root
    ann_file = args.annFile

    ds, data_size = create_yolo_dataset(data_root,
                                        ann_file,
                                        is_training=False,
                                        batch_size=args.per_batch_size,
                                        max_epoch=1,
                                        device_num=1,
                                        rank=rank_id,
                                        shuffle=False,
                                        config=config)

    args.logger.info('testing shape : {}'.format(config.test_img_shape))
    args.logger.info('totol {} images to eval'.format(data_size))

    network.set_train(False)

    # init detection engine
    detection = DetectionEngine(args)

    input_shape = Tensor(tuple(config.test_img_shape), ms.float32)
    args.logger.info('Start inference....')
    for i, data in enumerate(ds.create_dict_iterator()):
        image = data["image"]

        image_shape = data["image_shape"]
        image_id = data["img_id"]

        prediction = network(image, input_shape)
        output_big, output_me, output_small = prediction
        output_big = output_big.asnumpy()
        output_me = output_me.asnumpy()
        output_small = output_small.asnumpy()
        image_id = image_id.asnumpy()
        image_shape = image_shape.asnumpy()

        detection.detect([output_small, output_me, output_big],
                         args.per_batch_size, image_shape, image_id)
        if i % 1000 == 0:
            args.logger.info('Processing... {:.2f}% '.format(
                i * args.per_batch_size / data_size * 100))

    args.logger.info('Calculating mAP...')
    detection.do_nms_for_results()
    result_file_path = detection.write_result()
    args.logger.info('result file path: {}'.format(result_file_path))
    eval_result = detection.get_eval_result()

    cost_time = time.time() - start_time
    args.logger.info('\n=============coco eval reulst=========\n' +
                     eval_result)
    args.logger.info('testing cost time {:.2f}h'.format(cost_time / 3600.))
Exemple #28
0
    args.rank = get_rank()
    args.group_size = get_group_size()

# select for master rank save ckpt or all rank save, compatible for model parallel
args.rank_save_ckpt_flag = 0
if args.is_save_on_master:
    if args.rank == 0:
        args.rank_save_ckpt_flag = 1
else:
    args.rank_save_ckpt_flag = 1

# logger
args.outputs_dir = os.path.join(
    args.ckpt_path,
    datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, args.rank)
args.logger.save_args(args)


def convert_training_shape(args_training_shape):
    training_shape = [int(args_training_shape), int(args_training_shape)]
    return training_shape


class BuildTrainNetwork(nn.Cell):
    def __init__(self, network_, criterion):
        super(BuildTrainNetwork, self).__init__()
        self.network = network_
        self.criterion = criterion

    def construct(self, input_data, label):
Exemple #29
0
 def __init__(self, db_name):
     self.logger = get_logger("db")
     db.init(db_name)
     db.connect()
     db.create_tables([ProfileModel, WaypointModel, SequenceModel])
     self.logger.debug("Connected to database")
def predict():
    """The function of predict."""
    args = parse_args()

    devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0
    context.set_context(mode=context.GRAPH_MODE,
                        device_target=args.device_target,
                        save_graphs=False,
                        device_id=devid)

    # logger
    args.outputs_dir = os.path.join(
        args.log_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    rank_id = int(
        os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0
    args.logger = get_logger(args.outputs_dir, rank_id)

    args.logger.info('Creating Network....')
    network = YOLOV3DarkNet53(is_training=False)

    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('yolo_network.'):
                param_dict_new[key[13:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        args.logger.info('load_model {} success'.format(args.pretrained))
    else:
        args.logger.info('{} not exists or not a pre-trained file'.format(
            args.pretrained))
        assert FileNotFoundError(
            '{} not exists or not a pre-trained file'.format(args.pretrained))
        exit(1)

    config = ConfigYOLOV3DarkNet53()
    args.logger.info('testing shape: {}'.format(config.test_img_shape))
    # data preprocess operation
    image, image_shape = data_preprocess(args.image_path, config)

    # init detection engine
    detection = DetectionEngine(args)

    input_shape = Tensor(tuple(config.test_img_shape), ms.float32)
    args.logger.info('Start inference....')
    network.set_train(False)
    prediction = network(Tensor(image.reshape(1, 3, 416, 416), ms.float32),
                         input_shape)
    output_big, output_me, output_small = prediction
    output_big = output_big.asnumpy()
    output_me = output_me.asnumpy()
    output_small = output_small.asnumpy()

    detection.detect([output_small, output_me, output_big],
                     args.per_batch_size, image_shape, config)
    detection.do_nms_for_results()
    img = detection.draw_boxes_in_image(args.image_path)

    cv2.imwrite(os.path.join(args.output_dir, 'output.jpg'), img)
from configparser import ConfigParser
from shutil import copytree
from src.gui import first_time_setup_gui, detect_dcs_bios
from src.logger import get_logger
from pathlib import Path
import PySimpleGUI as PyGUI
import tempfile
import requests
import zipfile

DCS_BIOS_VERSION = '0.7.30'
DCS_BIOS_URL = "https://github.com/DCSFlightpanels/dcs-bios/releases/download/{}/DCS-BIOS.zip"

logger = get_logger(__name__)


def install_dcs_bios(dcs_path):
    try:
        with open(dcs_path + "Scripts\\Export.lua", "r") as f:
            filestr = f.read()
    except FileNotFoundError:
        filestr = str()

    with open(dcs_path + "Scripts\\Export.lua", "a") as f:
        if "dofile(lfs.writedir()..[[Scripts\\DCS-BIOS\\BIOS.lua]])" not in filestr:
            f.write(
                "\ndofile(lfs.writedir()..[[Scripts\\DCS-BIOS\\BIOS.lua]])\n")

    try:
        with tempfile.TemporaryDirectory() as tmp_dir:
            url = DCS_BIOS_URL.format(DCS_BIOS_VERSION)
def test():
    """The function of eval."""
    args = parse_args()

    devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0
    context.set_context(mode=context.GRAPH_MODE,
                        device_target='Ascend',
                        save_graphs=True,
                        device_id=devid)

    # logger
    args.outputs_dir = os.path.join(
        args.log_path,
        datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
    rank_id = int(
        os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0
    args.logger = get_logger(args.outputs_dir, rank_id)

    context.reset_auto_parallel_context()
    parallel_mode = ParallelMode.STAND_ALONE
    context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                      gradients_mean=True,
                                      device_num=1)

    args.logger.info('Creating Network....')
    network = SolveOutput(YOLOV3DarkNet53(is_training=False))

    data_root = args.data_root
    ann_file = args.annFile

    args.logger.info(args.pretrained)
    if os.path.isfile(args.pretrained):
        param_dict = load_checkpoint(args.pretrained)
        param_dict_new = {}
        for key, values in param_dict.items():
            if key.startswith('moments.'):
                continue
            elif key.startswith('yolo_network.'):
                param_dict_new[key[13:]] = values
            else:
                param_dict_new[key] = values
        load_param_into_net(network, param_dict_new)
        args.logger.info('load_model {} success'.format(args.pretrained))
    else:
        args.logger.info('{} not exists or not a pre-trained file'.format(
            args.pretrained))
        assert FileNotFoundError(
            '{} not exists or not a pre-trained file'.format(args.pretrained))
        exit(1)

    config = ConfigYOLOV3DarkNet53()
    if args.testing_shape:
        config.test_img_shape = conver_testing_shape(args)

    ds, data_size = create_yolo_dataset(data_root,
                                        ann_file,
                                        is_training=False,
                                        batch_size=1,
                                        max_epoch=1,
                                        device_num=1,
                                        rank=rank_id,
                                        shuffle=False,
                                        config=config)

    args.logger.info('testing shape : {}'.format(config.test_img_shape))
    args.logger.info('totol {} images to eval'.format(data_size))

    network.set_train(False)
    # build attacker
    attack = DeepFool(network,
                      num_classes=80,
                      model_type='detection',
                      reserve_ratio=0.9,
                      bounds=(0, 1))
    input_shape = Tensor(tuple(config.test_img_shape), ms.float32)

    args.logger.info('Start inference....')
    batch_num = args.samples_num
    adv_example = []
    for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):
        if i >= batch_num:
            break
        image = data["image"]
        image_shape = data["image_shape"]

        gt_boxes, gt_logits = network(image, input_shape)
        gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()
        gt_labels = np.argmax(gt_logits, axis=2)

        adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()),
                                  (gt_boxes, gt_labels))
        adv_example.append(adv_img)
    np.save('adv_example.npy', adv_example)